gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module generates a docker environment for a job'''
from __future__ import division
from fabric.api import sudo, run, settings
from logging import getLogger
from os.path import join as join_path
from time import sleep
import random
import os
IMPALA_HOME = '/home/dev/Impala'
CORE_PATH = '/tmp/core_files'
DEFAULT_BRANCH_NAME = 'origin/cdh5-trunk'
DEFAULT_DOCKER_IMAGE_NAME = 'impala-desktop.ca.cloudera.com:5000/ubuntu-14.04:cdh5-trunk'
DOCKER_USER_NAME = 'dev'
NUM_START_ATTEMPTS = 50
NUM_FABRIC_ATTEMPTS = 50
LOG = getLogger('ImpalaDockerEnv')
def retry(func):
'''Retry decorator.'''
def wrapper(*args, **kwargs):
attempt_num = 0
while True:
attempt_num += 1
try:
return func(*args, **kwargs)
except:
LOG.exception('{0} exception [{1}] (try: {2})'.format(
func.__name__, args[0], attempt_num))
if attempt_num == NUM_FABRIC_ATTEMPTS:
raise
sleep_time = random.randint(1, attempt_num)
sleep(sleep_time)
return wrapper
class ImpalaDockerEnv(object):
'''Represents an Impala environemnt inside a Docker container. Used for starting
Impala, getting stack traces after a crash and keeping track of the ports on which SSH,
Postgres and Impala are running.
'''
def __init__(self, git_command):
self.ssh_port = None
self.impala_port = None
self.postgres_port = None
self.container_id = None
self.git_command = git_command
self.host = os.environ['TARGET_HOST']
self.host_username = os.environ['TARGET_HOST_USERNAME']
self.docker_image_name = os.environ.get(
'DOCKER_IMAGE_NAME', DEFAULT_DOCKER_IMAGE_NAME)
def stop_docker(self):
with settings(warn_only = True, host_string = self.host, user = self.host_username):
retry(sudo)('docker stop {0}'.format(self.container_id), pty=True)
retry(sudo)('docker rm {0}'.format(self.container_id), pty=True)
def start_new_container(self):
'''Starts a container with port forwarding for ssh, impala and postgres. '''
for _ in range(NUM_START_ATTEMPTS):
with settings(warn_only = True, host_string = self.host, user = self.host_username):
port = random.randint(0, 999)
self.ssh_port = 55000 + port
self.impala_port = 56000 + port
self.postgres_port = 57000 + port
start_command = (
'docker pull {docker_image_name} '
'&& docker run -d -t -p {postgres_port}:5432 -p {ssh_port}:22 '
'-p {impala_port}:21050 {docker_image_name} /bin/docker-boot-daemon').format(
ssh_port = self.ssh_port,
impala_port = self.impala_port,
postgres_port = self.postgres_port,
docker_image_name = self.docker_image_name)
try:
self.container_id = sudo(start_command, pty=True)
except:
LOG.exception('start_new_container')
if self.container_id is not None:
break
else:
LOG.error('Container failed to start after {0} attempts'.format(NUM_START_ATTEMPTS))
def get_git_hash(self):
'''Returns Git hash if the current commit. '''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
git_hash = retry(run)('cd {IMPALA_HOME} && git rev-parse --short HEAD'.format(
IMPALA_HOME = IMPALA_HOME))
return git_hash
def run_all(self):
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
run_all_command = ('source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/bin/create-test-configuration.sh '
'&& {IMPALA_HOME}/testdata/bin/run-all.sh').format(
IMPALA_HOME = IMPALA_HOME)
retry(run)(run_all_command, pty=False)
def build_impala(self):
'''Fetches and Builds Impala. If git_command is not present the latest version is
fetched by default. '''
if self.git_command:
build_command = (
'mkdir -p {CORE_PATH} && chmod 777 {CORE_PATH} '
'docker-boot && cd {IMPALA_HOME} && {git_command} '
'&& source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/buildall.sh -notests').format(
git_command = self.git_command,
IMPALA_HOME = IMPALA_HOME,
CORE_PATH = CORE_PATH)
else:
build_command = (
'mkdir -p {CORE_PATH} && chmod 777 {CORE_PATH} '
'&& docker-boot && cd {IMPALA_HOME} '
'&& source {IMPALA_HOME}/bin/impala-config.sh').format(
IMPALA_HOME = IMPALA_HOME,
CORE_PATH = CORE_PATH)
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
result = retry(run)(build_command, pty=False)
return result
def start_impala(self):
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
start_command = ('source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/bin/start-impala-cluster.py').format(IMPALA_HOME = IMPALA_HOME)
result = retry(run)(start_command, pty=False)
return result
def is_impala_running(self):
'''Check that exactly 3 impalads are running inside the docker instance.'''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
return retry(run)('ps aux | grep impalad').count('/service/impalad') == 3
def get_stack(self):
'''Finds the newest core file and extracts the stack trace from it using gdb.
'''
IMPALAD_PATH = '{IMPALA_HOME}/be/build/debug/service/impalad'.format(
IMPALA_HOME = IMPALA_HOME)
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
core_file_name = retry(run)('ls {0} -t1 | head -1'.format(CORE_PATH))
LOG.info('Core File Name: {0}'.format(core_file_name))
if 'core' not in core_file_name:
return None
core_full_path = join_path(CORE_PATH, core_file_name)
stack_trace = retry(run)('gdb {0} {1} --batch --quiet --eval-command=bt'.format(
IMPALAD_PATH, core_full_path))
self.delete_core_files()
return stack_trace
def delete_core_files(self):
'''Delete all core files. This is usually done after the stack was extracted.'''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
retry(run)('rm -f {0}/core.*'.format(CORE_PATH))
def prepare(self):
'''Create a new Impala Environment. Starts a docker container and builds Impala in it.
'''
self.start_new_container()
LOG.info('Container Started')
# Wait for the SSH service to start inside the docker instance. Usually takes 1
# second. This is simple and reliable. An alternative implementation is to poll with
# timeout if SSH was started.
sleep(10)
result = self.build_impala()
LOG.info('Build Complete, Result: {0}'.format(result))
try:
result = self.run_all()
except Exception:
LOG.info('run_all exception')
LOG.info('Run All Complete, Result: {0}'.format(result))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_messaging.rpc import dispatcher as rpc
import six
from senlin.common import exception
from senlin.common.i18n import _
from senlin.db import api as db_api
from senlin.engine.actions import base as action_mod
from senlin.engine import cluster as cluster_mod
from senlin.engine import dispatcher
from senlin.engine import environment
from senlin.engine import service
from senlin.tests.common import base
from senlin.tests.common import utils
from senlin.tests import fakes
class ClusterTest(base.SenlinTestCase):
def setUp(self):
super(ClusterTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='cluster_test_tenant')
self.eng = service.EngineService('host-a', 'topic-a')
self.eng.init_tgm()
self.eng.dispatcher = mock.Mock()
env = environment.global_env()
env.register_profile('TestProfile', fakes.TestProfile)
env.register_policy('TestPolicy', fakes.TestPolicy)
self.profile = self.eng.profile_create(
self.ctx, 'p-test', 'TestProfile',
spec={'INT': 10, 'STR': 'string'}, perm='1111')
self.policy = self.eng.policy_create(
self.ctx, 'policy_1', 'TestPolicy',
spec={'KEY1': 'string'}, cooldown=60, level=50)
def _verify_action(self, obj, action, name, target, cause, inputs=None):
self.assertEqual(action, obj['action'])
self.assertEqual(name, obj['name'])
self.assertEqual(target, obj['target'])
self.assertEqual(cause, obj['cause'])
self.assertEqual(inputs, obj['inputs'])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_create_default(self, notify):
result = self.eng.cluster_create(self.ctx, 'c-1', 0,
self.profile['id'])
self.assertIsNotNone(result)
self.assertEqual('c-1', result['name'])
self.assertEqual(0, result['size'])
self.assertEqual(self.profile['id'], result['profile_id'])
self.assertEqual(self.ctx.user, result['user'])
self.assertEqual('cluster_test_tenant', result['project'])
self.assertIsNone(result['parent'])
self.assertIsNone(result['timeout'])
self.assertIsNone(result['tags'])
action_id = result['action']
action = db_api.action_get(self.ctx, result['action'])
self.assertIsNotNone(action)
self._verify_action(action, 'CLUSTER_CREATE',
'cluster_create_%s' % result['id'][:8],
result['id'],
cause=action_mod.CAUSE_RPC)
notify.assert_called_once_with(self.ctx,
self.eng.dispatcher.NEW_ACTION,
None, action_id=action_id)
@mock.patch.object(dispatcher, 'notify')
def test_cluster_create_with_timeout(self, notify):
result = self.eng.cluster_create(self.ctx, 'c-1', 0,
self.profile['id'],
timeout=120)
self.assertIsNotNone(result)
self.assertEqual('c-1', result['name'])
self.assertEqual(120, result['timeout'])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_create,
self.ctx, 'c-1', 0,
self.profile['id'],
timeout='N/A')
self.assertEqual(exception.InvalidParameter, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_create_with_size(self, notify):
result = self.eng.cluster_create(self.ctx, 'c-1', 2,
self.profile['id'])
self.assertIsNotNone(result)
self.assertEqual('c-1', result['name'])
self.assertEqual(2, result['size'])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_create,
self.ctx, 'c-1', 'Big',
self.profile['id'])
self.assertEqual(exception.InvalidParameter, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_create_with_parent(self, notify):
result = self.eng.cluster_create(self.ctx, 'c-1', 2,
self.profile['id'],
parent='fake id')
self.assertIsNotNone(result)
self.assertEqual('c-1', result['name'])
self.assertEqual('fake id', result['parent'])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_create_with_tags(self, notify):
result = self.eng.cluster_create(self.ctx, 'c-1', 2,
self.profile['id'],
tags={'k': 'v'})
self.assertIsNotNone(result)
self.assertEqual('c-1', result['name'])
self.assertEqual({'k': 'v'}, result['tags'])
def test_cluster_create_profile_not_found(self):
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_create,
self.ctx, 'c-1', 0, 'Bogus')
self.assertEqual(exception.ProfileNotFound, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_create_with_profile_name_or_short_id(self, notify):
result = self.eng.cluster_create(self.ctx, 'c-1', 0,
self.profile['id'][:8])
self.assertIsNotNone(result)
self.assertEqual(self.profile['id'], result['profile_id'])
self.eng.cluster_create(self.ctx, 'c-2', 0, self.profile['name'])
self.assertIsNotNone(result)
self.assertEqual(self.profile['id'], result['profile_id'])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_get(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0,
self.profile['id'])
for identity in [c['id'], c['id'][:6], 'c-1']:
result = self.eng.cluster_get(self.ctx, identity)
self.assertIsInstance(result, dict)
self.assertEqual(c['id'], result['id'])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_get, self.ctx, 'Bogus')
self.assertEqual(exception.ClusterNotFound, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_list(self, notify):
c1 = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
c2 = self.eng.cluster_create(self.ctx, 'c-2', 0, self.profile['id'])
result = self.eng.cluster_list(self.ctx)
self.assertIsInstance(result, list)
names = [c['name'] for c in result]
ids = [c['id'] for c in result]
self.assertIn(c1['name'], names[0])
self.assertIn(c2['name'], names[1])
self.assertIn(c1['id'], ids[0])
self.assertIn(c2['id'], ids[1])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_list_with_limit_marker(self, notify):
c1 = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
c2 = self.eng.cluster_create(self.ctx, 'c-2', 0, self.profile['id'])
result = self.eng.cluster_list(self.ctx, limit=0)
self.assertEqual(0, len(result))
result = self.eng.cluster_list(self.ctx, limit=1)
self.assertEqual(1, len(result))
result = self.eng.cluster_list(self.ctx, limit=2)
self.assertEqual(2, len(result))
result = self.eng.cluster_list(self.ctx, limit=3)
self.assertEqual(2, len(result))
result = self.eng.cluster_list(self.ctx, marker=c1['id'])
self.assertEqual(1, len(result))
result = self.eng.cluster_list(self.ctx, marker=c2['id'])
self.assertEqual(0, len(result))
self.eng.cluster_create(self.ctx, 'c-3', 0, self.profile['id'])
result = self.eng.cluster_list(self.ctx, limit=1, marker=c1['id'])
self.assertEqual(1, len(result))
result = self.eng.cluster_list(self.ctx, limit=2, marker=c1['id'])
self.assertEqual(2, len(result))
@mock.patch.object(dispatcher, 'notify')
def test_cluster_list_with_sort_keys(self, notify):
c1 = self.eng.cluster_create(self.ctx, 'CC', 0, self.profile['id'])
c2 = self.eng.cluster_create(self.ctx, 'BB', 0, self.profile['id'])
# default by created_time
result = self.eng.cluster_list(self.ctx)
self.assertEqual(c1['id'], result[0]['id'])
self.assertEqual(c2['id'], result[1]['id'])
# use name for sorting
result = self.eng.cluster_list(self.ctx, sort_keys=['name'])
self.assertEqual(c2['id'], result[0]['id'])
self.assertEqual(c1['id'], result[1]['id'])
# unknown keys will be ignored
result = self.eng.cluster_list(self.ctx, sort_keys=['duang'])
self.assertIsNotNone(result)
@mock.patch.object(dispatcher, 'notify')
def test_cluster_list_with_sort_dir(self, notify):
c1 = self.eng.cluster_create(self.ctx, 'BB', 0, self.profile['id'])
c2 = self.eng.cluster_create(self.ctx, 'AA', 0, self.profile['id'])
c3 = self.eng.cluster_create(self.ctx, 'CC', 0, self.profile['id'])
# default by created_time, ascending
result = self.eng.cluster_list(self.ctx)
self.assertEqual(c1['id'], result[0]['id'])
self.assertEqual(c2['id'], result[1]['id'])
# sort by created_time, descending
result = self.eng.cluster_list(self.ctx, sort_dir='desc')
self.assertEqual(c3['id'], result[0]['id'])
self.assertEqual(c2['id'], result[1]['id'])
# use name for sorting, descending
result = self.eng.cluster_list(self.ctx, sort_keys=['name'],
sort_dir='desc')
self.assertEqual(c3['id'], result[0]['id'])
self.assertEqual(c1['id'], result[1]['id'])
# use permission for sorting
ex = self.assertRaises(ValueError,
self.eng.cluster_list, self.ctx,
sort_dir='Bogus')
self.assertEqual("Unknown sort direction, must be "
"'desc' or 'asc'", six.text_type(ex))
@mock.patch.object(dispatcher, 'notify')
def test_cluster_list_show_deleted(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
result = self.eng.cluster_list(self.ctx)
self.assertEqual(1, len(result))
self.assertEqual(c['id'], result[0]['id'])
db_api.cluster_delete(self.ctx, c['id'])
result = self.eng.cluster_list(self.ctx)
self.assertEqual(0, len(result))
result = self.eng.cluster_list(self.ctx, show_deleted=True)
self.assertEqual(1, len(result))
self.assertEqual(c['id'], result[0]['id'])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_list_show_nested(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'],
parent='other-cluster')
result = self.eng.cluster_list(self.ctx)
self.assertEqual(0, len(result))
result = self.eng.cluster_list(self.ctx, show_nested=True)
self.assertEqual(1, len(result))
self.assertEqual(c['id'], result[0]['id'])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_list_with_filters(self, notify):
self.eng.cluster_create(self.ctx, 'BB', 0, self.profile['id'])
self.eng.cluster_create(self.ctx, 'AA', 0, self.profile['id'])
self.eng.cluster_create(self.ctx, 'CC', 0, self.profile['id'])
result = self.eng.cluster_list(self.ctx, filters={'name': 'BB'})
self.assertEqual(1, len(result))
self.assertEqual('BB', result[0]['name'])
result = self.eng.cluster_list(self.ctx, filters={'name': 'DD'})
self.assertEqual(0, len(result))
def test_cluster_list_bad_param(self):
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_list, self.ctx, limit='no')
self.assertEqual(exception.InvalidParameter, ex.exc_info[0])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_list, self.ctx,
show_deleted='no')
self.assertEqual(exception.InvalidParameter, ex.exc_info[0])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_list, self.ctx,
show_nested='no')
self.assertEqual(exception.InvalidParameter, ex.exc_info[0])
def test_cluster_list_empty(self):
result = self.eng.cluster_list(self.ctx)
self.assertIsInstance(result, list)
self.assertEqual(0, len(result))
@mock.patch.object(dispatcher, 'notify')
def test_cluster_find(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid = c['id']
result = self.eng.cluster_find(self.ctx, cid)
self.assertIsNotNone(result)
# short id
result = self.eng.cluster_find(self.ctx, cid[:5])
self.assertIsNotNone(result)
# name
result = self.eng.cluster_find(self.ctx, 'c-1')
self.assertIsNotNone(result)
# others
self.assertRaises(exception.ClusterNotFound,
self.eng.cluster_find, self.ctx, 'Bogus')
@mock.patch.object(dispatcher, 'notify')
def test_cluster_find_show_deleted(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid = c['id']
db_api.cluster_delete(self.ctx, cid)
for identity in [cid, cid[:6], 'c-1']:
self.assertRaises(exception.ClusterNotFound,
self.eng.cluster_find, self.ctx, identity)
# short id and name based finding does not support show_deleted
for identity in [cid[:6], 'p-1']:
self.assertRaises(exception.ClusterNotFound,
self.eng.cluster_find, self.ctx, identity)
# ID based finding is okay with show_deleted
result = self.eng.cluster_find(self.ctx, cid, show_deleted=True)
self.assertIsNotNone(result)
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_simple_success(self, notify):
c1 = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid = c1['id']
# 1. update name
self.eng.cluster_update(self.ctx, cid, name='c-2')
c = self.eng.cluster_get(self.ctx, cid)
self.assertEqual(cid, c['id'])
self.assertEqual('c-2', c['name'])
# 2. update parent
p = self.eng.cluster_create(self.ctx, 'parent', 0, self.profile['id'])
self.eng.cluster_update(self.ctx, cid, parent=p['id'])
c = self.eng.cluster_get(self.ctx, cid)
self.assertEqual(cid, c['id'])
self.assertEqual(p['id'], c['parent'])
# 3. update tags
self.eng.cluster_update(self.ctx, cid, tags={'k': 'v'})
c = self.eng.cluster_get(self.ctx, cid)
self.assertEqual(cid, c['id'])
self.assertEqual({'k': 'v'}, c['tags'])
# 4. update timeout
self.eng.cluster_update(self.ctx, cid, timeout=119)
c = self.eng.cluster_get(self.ctx, cid)
self.assertEqual(cid, c['id'])
self.assertEqual(119, c['timeout'])
def test_cluster_update_cluster_not_found(self):
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_update, self.ctx, 'Bogus')
self.assertEqual(exception.ClusterNotFound, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_cluster_bad_status(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cluster = cluster_mod.Cluster.load(self.ctx, c['id'])
cluster.set_status(self.ctx, cluster.DELETED, reason='test')
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_update, self.ctx, c['id'],
name='new name')
self.assertEqual(exception.ClusterNotFound, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_parent_not_found(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_update, self.ctx, c['id'],
parent='Bogus')
self.assertEqual(exception.ClusterNotFound, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_timeout_not_integer(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_update, self.ctx, c['id'],
timeout='Long')
self.assertEqual(exception.InvalidParameter, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_cluster_status_error(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cluster = cluster_mod.Cluster.load(self.ctx, c['id'])
cluster.set_status(self.ctx, cluster.ERROR, reason='test')
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_update, self.ctx, c['id'],
profile_id='good_profile')
self.assertEqual(exception.NotSupported, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_update_to_same_profile(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
self.eng.cluster_update(self.ctx, c['id'],
profile_id=self.profile['id'])
result = self.eng.cluster_get(self.ctx, c['id'])
self.assertEqual(c['id'], result['id'])
self.assertEqual(c['profile_id'], result['profile_id'])
# notify is only called once, because the 'cluster_update' operation
# was not causing any new action to be dispatched
notify.assert_called_once()
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_update_to_diff_profile_type(self, notify):
# Register a different profile
env = environment.global_env()
env.register_profile('DiffProfileType', fakes.TestProfile)
new_profile = self.eng.profile_create(
self.ctx, 'p-test', 'DiffProfileType',
spec={'INT': 10, 'STR': 'string'}, perm='1111')
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_update,
self.ctx, c['id'], profile_id=new_profile['id'])
self.assertEqual(exception.ProfileTypeNotMatch, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_profile_not_found(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_update,
self.ctx, c['id'], profile_id='Bogus')
self.assertEqual(exception.ProfileNotFound, ex.exc_info[0])
@mock.patch.object(dispatcher, 'notify')
def test_cluster_update_profile_normal(self, notify):
new_profile = self.eng.profile_create(
self.ctx, 'p-new', 'TestProfile',
spec={'INT': 20, 'STR': 'string new'})
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
self.eng.cluster_update(self.ctx, c['id'],
profile_id=new_profile['id'])
# TODO(anyone): uncomment the following lines when cluster-update
# is implemented
# action_id = result['action']
# action = self.eng.action_get(self.ctx, action_id)
# self._verify_action(action, 'CLUSTER_UPDATE',
# 'cluster_update_%s' % c['id'][:8],
# result['id'],
# cause=action_mod.CAUSE_RPC)
# notify.assert_called_once_with(self.ctx,
# self.eng.dispatcher.NEW_ACTION,
# None, action_id=action_id)
@mock.patch.object(dispatcher, 'notify')
def test_cluster_delete(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid = c['id']
result = self.eng.cluster_delete(self.ctx, cid)
self.assertIsNotNone(result)
# verify action is fired
action_id = result['action']
action = self.eng.action_get(self.ctx, action_id)
self._verify_action(action, 'CLUSTER_DELETE',
'cluster_delete_%s' % c['id'][:8],
c['id'],
cause=action_mod.CAUSE_RPC)
expected_call = mock.call(self.ctx,
self.eng.dispatcher.NEW_ACTION,
None, action_id=mock.ANY)
# two calls: one for create, the other for delete
notify.assert_has_calls([expected_call] * 2)
def test_cluster_delete_not_found(self):
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_delete, self.ctx, 'Bogus')
self.assertEqual(exception.ClusterNotFound, ex.exc_info[0])
self.assertEqual('The cluster (Bogus) could not be found.',
six.text_type(ex.exc_info[1]))
def _prepare_nodes(self, ctx, count=3, profile_id=None, **kwargs):
'''Prepare nodes for add or delete.'''
nodes = []
for i in range(count):
values = {
'name': 'test_node_name',
'physical_id': 'fake-phy-id-%s' % (i + 1),
'cluster_id': None,
'profile_id': profile_id or self.profile['id'],
'project': ctx.tenant_id,
'index': i + 1,
'role': None,
'created_time': None,
'updated_time': None,
'deleted_time': None,
'status': 'ACTIVE',
'status_reason': 'create complete',
'tags': {'foo': '123'},
'data': {'key1': 'value1'},
}
values.update(kwargs)
db_node = db_api.node_create(ctx, values)
nodes.append(six.text_type(db_node.id))
return nodes
@mock.patch.object(dispatcher, 'notify')
def test_cluster_add_nodes(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid = c['id']
nodes = self._prepare_nodes(self.ctx)
result = self.eng.cluster_add_nodes(self.ctx, cid, nodes)
# verify action is fired
action_id = result['action']
action = self.eng.action_get(self.ctx, action_id)
self._verify_action(action, 'CLUSTER_ADD_NODES',
'cluster_add_nodes_%s' % cid[:8],
cid, cause=action_mod.CAUSE_RPC,
inputs={'nodes': nodes})
expected_call = mock.call(self.ctx,
self.eng.dispatcher.NEW_ACTION,
None, action_id=mock.ANY)
# two calls: one for create, the other for adding nodes
notify.assert_has_calls([expected_call] * 2)
def test_cluster_add_nodes_cluster_not_found(self, notify):
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_add_nodes,
self.ctx, 'Bogus', ['n1', 'n2'])
self.assertEqual(exception.ClusterNotFound, ex.exc_info[0])
self.assertEqual('The cluster (Bogus) could not be found.',
six.text_type(ex.exc_info[1]))
@mock.patch.object(dispatcher, 'notify')
def test_cluster_add_nodes_empty_list(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid = c['id']
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_add_nodes,
self.ctx, cid, [])
self.assertEqual(exception.SenlinBadRequest, ex.exc_info[0])
self.assertEqual('The request is malformed: No nodes to add: []',
six.text_type(ex.exc_info[1]))
@mock.patch.object(dispatcher, 'notify')
def test_cluster_add_nodes_node_not_found(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid = c['id']
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_add_nodes,
self.ctx, cid, ['Bogus'])
self.assertEqual(exception.SenlinBadRequest, ex.exc_info[0])
self.assertEqual("The request is malformed: Nodes not found: "
"['Bogus']", six.text_type(ex.exc_info[1]))
@mock.patch.object(dispatcher, 'notify')
def test_cluster_add_nodes_node_not_active(self, notify):
c = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid = c['id']
nodes = self._prepare_nodes(self.ctx, count=1, status='ERROR')
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_add_nodes,
self.ctx, cid, nodes)
self.assertEqual(exception.SenlinBadRequest, ex.exc_info[0])
msg = _("Nodes are not ACTIVE: %s") % nodes
self.assertEqual(_("The request is malformed: %(msg)s") % {'msg': msg},
six.text_type(ex.exc_info[1]))
|
|
from __future__ import division
import operator
from itertools import product
import numpy as np
from mbuild.coordinate_transform import (equivalence_transform, translate,
rotate_around_x, rotate_around_y,
rotate_around_z)
from mbuild.utils.validation import assert_port_exists
from mbuild import clone
__all__ = ['Pattern', 'DiskPattern', 'SpherePattern', 'Random2DPattern',
'Random3DPattern', 'Grid2DPattern', 'Grid3DPattern']
class Pattern(object):
def __init__(self, points, orientations=None):
self.points = points
if orientations is None:
orientations = dict()
self.orientations = orientations # TODO: implement
def __len__(self):
return len(self.points)
def __getitem__(self, item):
return self.points[item]
def scale(self, scalar):
self.points *= scalar
self._adjust_ports()
def _adjust_ports(self):
for orientation, ports in self.orientations.items():
for port, point in zip(ports, self.points):
translate(port, point)
def apply(self, compound, orientation='', compound_port=''):
"""Arrange copies of a Compound as specified by the Pattern.
Parameters
----------
compound
orientation
Returns
-------
"""
compounds = list()
if self.orientations.get(orientation):
for port in self.orientations[orientation]:
new_compound = clone(compound)
new_port = new_compound.labels[compound_port]
equivalence_transform(new_compound, new_port['up'], port['up'])
compounds.append(new_compound)
else:
for point in self.points:
new_compound = clone(compound)
translate(new_compound, point)
compounds.append(new_compound)
return compounds
def apply_to_compound(self, guest, guest_port_name='down', host=None,
backfill=None, backfill_port_name='up'):
"""Attach copies of a guest Compound to Ports on a host Compound.
Parameters
----------
guest
guest_port_name
host
backfill
backfill_port_name
Returns
-------
"""
n_ports = len(host.available_ports())
assert n_ports >= self.points.shape[0], "Not enough ports for pattern."
assert_port_exists(guest_port_name, guest)
box = host.boundingbox
pattern = self.points * box.lengths + box.mins
port_positions = np.empty(shape=(n_ports, 3))
port_list = list()
for port_idx, port in enumerate(host.available_ports()):
port_positions[port_idx, :] = port['up']['middle'].pos
port_list.append(port)
used_ports = set() # Keep track of used ports for backfilling.
guests = []
for point in pattern:
closest_point_idx = np.argmin(host.min_periodic_distance(point, port_positions))
closest_port = port_list[closest_point_idx]
used_ports.add(closest_port)
# Attach the guest to the closest port.
new_guest = clone(guest)
equivalence_transform(new_guest, new_guest.labels[guest_port_name], closest_port)
guests.append(new_guest)
# Move the port as far away as possible (simpler than removing it).
# There may well be a more elegant/efficient way of doing this.
port_positions[closest_point_idx, :] = np.array([np.inf, np.inf, np.inf])
backfills = []
if backfill:
assert_port_exists(backfill_port_name, backfill)
# Attach the backfilling Compound to unused ports.
for port in port_list:
if port not in used_ports:
new_backfill = clone(backfill)
# Might make sense to have a backfill_port_name option...
equivalence_transform(
new_backfill, new_backfill.labels[backfill_port_name], port)
backfills.append(new_backfill)
return guests, backfills
class Random2DPattern(Pattern):
def __init__(self, n, orientations=None):
points = np.random.random((n, 3))
points[:, 2] = 0
super(Random2DPattern, self).__init__(points=points, orientations=orientations)
class Random3DPattern(Pattern):
def __init__(self, n, orientations=None):
points = np.random.random((n, 3))
super(Random3DPattern, self).__init__(points=points, orientations=orientations)
class Grid2DPattern(Pattern):
def __init__(self, n, m, orientations=None):
points = np.zeros(shape=(n*m, 3), dtype=float)
for i, j in product(range(n), range(m)):
points[i*m + j, 0] = i / n
points[i*m + j, 1] = j / m
super(Grid2DPattern, self).__init__(points=points, orientations=orientations)
class Grid3DPattern(Pattern):
def __init__(self, n, m, l, orientations=None):
points = np.zeros(shape=(n*m*l, 3), dtype=float)
for i, j, k in product(range(n), range(m), range(l)):
points[i*m*l + j*l + k, 0] = i / n
points[i*m*l + j*l + k, 1] = j / m
points[i*m*l + j*l + k, 2] = k / l
super(Grid3DPattern, self).__init__(points=points, orientations=orientations)
class SpherePattern(Pattern):
"""Generate N evenly distributed points on the unit sphere.
Sphere is centered at the origin. Alrgorithm based on the 'Golden Spiral'.
Code by Chris Colbert from the numpy-discussion list:
http://mail.scipy.org/pipermail/numpy-discussion/2009-July/043811.html
"""
def __init__(self, n):
phi = (1 + np.sqrt(5)) / 2 # the golden ratio
long_incr = 2*np.pi / phi # how much to increment the longitude
dz = 2.0 / float(n) # a unit sphere has diameter 2
bands = np.arange(n) # each band will have one point placed on it
z = bands * dz - 1 + (dz/2) # the height z of each band/point
r = np.sqrt(1 - z*z) # project onto xy-plane
az = bands * long_incr # azimuthal angle of point modulo 2 pi
x = r * np.cos(az)
y = r * np.sin(az)
points = np.column_stack((x, y, z))
from mbuild.port import Port
ports = list()
for point in points:
port = Port()
ports.append(port)
# Make the top of the port point toward the positive x axis.
rotate_around_z(port, -np.pi/2)
# Raise up (or down) the top of the port in the z direction.
rotate_around_y(port, -np.arcsin(point[2]))
# Rotate the Port along the z axis.
rotate_around_z(port, np.arctan2(point[1], point[0]))
# Move the Port a bit away from the surface of the Sphere.
#translate(port, point + 0.07)
super(SpherePattern, self).__init__(points=points,
orientations={'normal': ports})
class DiskPattern(Pattern):
""" """
def __init__(self, n, orientations=None):
radius = np.sqrt(np.arange(n) / float(n))
golden_angle = np.pi * (3 - np.sqrt(5))
theta = golden_angle * np.arange(n)
points = np.zeros((n, 3))
points[:, 0] = np.cos(theta)
points[:, 1] = np.sin(theta)
points *= radius.reshape((n, 1))
super(DiskPattern, self).__init__(points=points, orientations=orientations)
|
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from django.conf import settings
from json import loads as to_json
from openstack_dashboard import api
from openstack_dashboard.api.base import Quota
from openstack_dashboard.api.rest import nova
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from novaclient import exceptions
class NovaRestTestCase(test.TestCase):
#
# Keypairs
#
@mock.patch.object(nova.api, 'nova')
def test_keypair_get(self, nc):
request = self.mock_rest_request()
nc.keypair_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.Keypairs().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"id": "one"}, {"id": "two"}]})
nc.keypair_list.assert_called_once_with(request)
@mock.patch.object(nova.api, 'nova')
def test_keypair_create(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!"}''')
new = nc.keypair_create.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'sekrit'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.json,
{"name": "Ni!", "public_key": "sekrit"})
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_create.assert_called_once_with(request, 'Ni!')
@mock.patch.object(nova.api, 'nova')
def test_keypair_import(self, nc):
request = self.mock_rest_request(body='''
{"name": "Ni!", "public_key": "hi"}
''')
new = nc.keypair_import.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'hi'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.json,
{"name": "Ni!", "public_key": "hi"})
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_import.assert_called_once_with(request, 'Ni!', 'hi')
def test_keypair_create_and_download(self):
self._test_keypair_create_and_download(False)
def test_keypair_recreate_and_download(self):
self._test_keypair_create_and_download(True)
@mock.patch.object(nova.api, 'nova')
def _test_keypair_create_and_download(self, recreate_keypair, nc):
params = {}
if recreate_keypair:
params = {'regenerate': 'true'}
request = self.mock_rest_request(GET=params)
keypair_create_response = mock.Mock()
keypair_create_response.private_key = "private key content"
nc.keypair_create.return_value = keypair_create_response
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypair().get(request, "Ni!")
if recreate_keypair:
nc.keypair_delete.assert_called_once_with(request, 'Ni!')
else:
nc.keypair_delete.assert_not_called()
nc.keypair_create.assert_called_once_with(request, 'Ni!')
self.assertStatusCode(response, 200)
self.assertEqual(
response['Content-Disposition'],
'attachment; filename=ni.pem')
self.assertEqual(
response.content.decode('utf-8'),
"private key content")
self.assertEqual(response['Content-Length'], '19')
@mock.patch.object(nova.api, 'nova')
def test_keypair_fail_to_create_because_already_exists(self, nc):
request = self.mock_rest_request(GET={})
conflict_exception = exceptions.Conflict(409, 'keypair exists!')
nc.keypair_create.side_effect = conflict_exception
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypair().get(request, "Ni!")
self.assertEqual(response.status_code, 409)
@mock.patch.object(nova.api, 'nova')
def test_keypair_fail_to_create(self, nc):
request = self.mock_rest_request(GET={})
surprise_exception = exceptions.ClientException(501, 'Boom!')
nc.keypair_create.side_effect = surprise_exception
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypair().get(request, "Ni!")
self.assertEqual(response.status_code, 500)
#
# Availability Zones
#
def test_availzone_get_brief(self):
self._test_availzone_get(False)
def test_availzone_get_detailed(self):
self._test_availzone_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_availzone_get(self, detail, nc):
if detail:
request = self.mock_rest_request(GET={'detailed': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.availability_zone_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.AvailabilityZones().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"id": "one"}, {"id": "two"}]})
nc.availability_zone_list.assert_called_once_with(request, detail)
#
# Limits
#
def test_limits_get_not_reserved(self):
self._test_limits_get(False)
def test_limits_get_reserved(self):
self._test_limits_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_limits_get(self, reserved, nc):
if reserved:
request = self.mock_rest_request(GET={'reserved': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.tenant_absolute_limits.return_value = {'id': 'one'}
response = nova.Limits().get(request)
self.assertStatusCode(response, 200)
nc.tenant_absolute_limits.assert_called_once_with(request, reserved)
self.assertEqual(response.json, {"id": "one"})
#
# Servers
#
@mock.patch.object(nova.api, 'nova')
def test_server_create_missing(self, nc):
request = self.mock_rest_request(body='''{"name": "hi"}''')
response = nova.Servers().post(request)
self.assertStatusCode(response, 400)
self.assertEqual(response.json,
"missing required parameter 'source_id'")
nc.server_create.assert_not_called()
@mock.patch.object(nova.api, 'nova')
def test_server_create_basic(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!",
"source_id": "image123", "flavor_id": "flavor123",
"key_name": "sekrit", "user_data": "base64 yes",
"security_groups": [{"name": "root"}]}
''')
new = nc.server_create.return_value
new.to_dict.return_value = {'id': 'server123'}
new.id = 'server123'
response = nova.Servers().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.json, {"id": "server123"})
self.assertEqual(response['location'], '/api/nova/servers/server123')
nc.server_create.assert_called_once_with(
request, 'Ni!', 'image123', 'flavor123', 'sekrit', 'base64 yes',
[{'name': 'root'}]
)
@mock.patch.object(nova.api, 'nova')
def test_server_list(self, nc):
request = self.mock_rest_request()
nc.server_list.return_value = ([
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
], False)
response = nova.Servers().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{'items': [{'id': 'one'}, {'id': 'two'}]})
nc.server_list.assert_called_once_with(request)
@mock.patch.object(nova.api, 'nova')
def test_server_get_single(self, nc):
request = self.mock_rest_request()
nc.server_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Server().get(request, "1")
self.assertStatusCode(response, 200)
nc.server_get.assert_called_once_with(request, "1")
#
# Server Groups
#
@mock.patch.object(nova.api, 'nova')
def test_server_group_list(self, nc):
request = self.mock_rest_request()
nc.server_group_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}}),
]
response = nova.ServerGroups().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{'items': [{'id': '1'}, {'id': '2'}]})
nc.server_group_list.assert_called_once_with(request)
#
# Server Metadata
#
@mock.patch.object(nova.api, 'nova')
def test_server_get_metadata(self, nc):
request = self.mock_rest_request()
meta = {'foo': 'bar'}
nc.server_get.return_value.to_dict.return_value.get.return_value = meta
response = nova.ServerMetadata().get(request, "1")
self.assertStatusCode(response, 200)
nc.server_get.assert_called_once_with(request, "1")
@mock.patch.object(nova.api, 'nova')
def test_server_edit_metadata(self, nc):
request = self.mock_rest_request(
body='{"updated": {"a": "1", "b": "2"}, "removed": ["c", "d"]}'
)
response = nova.ServerMetadata().patch(request, '1')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
nc.server_metadata_update.assert_called_once_with(
request, '1', {'a': '1', 'b': '2'}
)
nc.server_metadata_delete.assert_called_once_with(
request, '1', ['c', 'd']
)
#
# Extensions
#
@mock.patch.object(nova.api, 'nova')
@mock.patch.object(settings,
'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', ['baz'])
def _test_extension_list(self, nc):
request = self.mock_rest_request()
nc.list_extensions.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'baz'}}),
]
response = nova.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "foo"}, {"name": "bar"}]})
nc.list_extensions.assert_called_once_with(request)
#
# Flavors
#
@mock.patch.object(nova.api, 'nova')
def test_flavor_get_single_with_access_list(self, nc):
request = self.mock_rest_request(GET={'get_access_list': 'tRuE'})
nc.flavor_get.return_value.to_dict.return_value = {'name': '1'}
nc.flavor_get.return_value.is_public = False
nc.flavor_access_list.return_value = [
mock.Mock(**{'tenant_id': '11'}),
mock.Mock(**{'tenant_id': '22'}),
]
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
self.assertEqual(to_json(response.content.decode('utf-8')),
to_json('{"access-list": ["11", "22"], "name": "1"}'))
nc.flavor_get.assert_called_once_with(request, "1",
get_extras=False)
def test_get_extras_no(self):
self._test_flavor_get_single(get_extras=False)
def test_get_extras_yes(self):
self._test_flavor_get_single(get_extras=True)
def test_get_extras_default(self):
self._test_flavor_get_single(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_get_single(self, nc, get_extras):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request()
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.json, {"extras": {}, "name": "1"})
else:
self.assertEqual(response.json, {"name": "1"})
nc.flavor_get.assert_called_once_with(request, "1",
get_extras=get_extras)
@mock.patch.object(nova.api, 'nova')
def test_flavor_get_single_with_swap_set_to_empty(self, nc):
request = self.mock_rest_request()
nc.flavor_get.return_value\
.to_dict.return_value = {'name': '1', 'swap': ''}
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
self.assertEqual(to_json(response.content.decode('utf-8')),
to_json('{"name": "1", "swap": 0}'))
@mock.patch.object(nova.api, 'nova')
def test_flavor_delete(self, nc):
request = self.mock_rest_request()
nova.Flavor().delete(request, "1")
nc.flavor_delete.assert_called_once_with(request, "1")
@mock.patch.object(nova.api, 'nova')
def test_flavor_create(self, nc):
flavor_req_data = '{"name": "flavor", ' \
'"ram": 12, ' \
'"vcpus": 1, ' \
'"disk": 2, ' \
'"OS-FLV-EXT-DATA:ephemeral": 3, ' \
'"swap": 4, ' \
'"id": "123"' \
'}'
nc.flavor_create.return_value = mock.Mock(**{
'id': '123',
'to_dict.return_value': {'id': '123', 'name': 'flavor'}
})
flavor_data = {'name': 'flavor',
'memory': 12,
'vcpu': 1,
'disk': 2,
'ephemeral': 3,
'swap': 4,
'flavorid': '123',
'is_public': True}
request = self.mock_rest_request(body=flavor_req_data)
response = nova.Flavors().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'], '/api/nova/flavors/123')
nc.flavor_create.assert_called_once_with(request, **flavor_data)
@mock.patch.object(nova.api, 'nova')
def test_flavor_create_with_access_list(self, nc):
flavor_req_data = '{"name": "flavor", ' \
'"ram": 12, ' \
'"vcpus": 1, ' \
'"disk": 2, ' \
'"OS-FLV-EXT-DATA:ephemeral": 3, ' \
'"swap": 4, ' \
'"id": "123", ' \
'"flavor_access": [{"id":"1", "name":"test"}]' \
'}'
nc.flavor_create.return_value = mock.Mock(**{
'id': '1234',
'to_dict.return_value': {'id': '1234', 'name': 'flavor'}
})
flavor_data = {'name': 'flavor',
'memory': 12,
'vcpu': 1,
'disk': 2,
'ephemeral': 3,
'swap': 4,
'flavorid': '123',
'is_public': False}
request = self.mock_rest_request(body=flavor_req_data)
response = nova.Flavors().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'], '/api/nova/flavors/1234')
nc.flavor_create.assert_called_once_with(request, **flavor_data)
nc.add_tenant_to_flavor.assert_called_once_with(request, '1234', '1')
@mock.patch.object(nova.api, 'nova')
def test_flavor_update(self, nc):
flavor_req_data = '{"name": "flavor", ' \
'"ram": 12, ' \
'"vcpus": 1, ' \
'"disk": 2, ' \
'"OS-FLV-EXT-DATA:ephemeral": 3, ' \
'"swap": 4' \
'}'
nc.flavor_create.return_value = mock.Mock(**{
'id': '123',
'to_dict.return_value': {'id': '123', 'name': 'flavor'}
})
flavor_data = {'name': 'flavor',
'memory': 12,
'vcpu': 1,
'disk': 2,
'ephemeral': 3,
'swap': 4,
'flavorid': '123',
'is_public': True}
request = self.mock_rest_request(body=flavor_req_data)
response = nova.Flavor().patch(request, '123')
self.assertStatusCode(response, 204)
nc.flavor_delete.assert_called_once_with(request, '123')
nc.flavor_create.assert_called_once_with(request, **flavor_data)
@mock.patch.object(nova.api, 'nova')
def test_flavor_update_with_extras(self, nc):
flavor_req_data = '{"name": "flavor", ' \
'"ram": 12, ' \
'"vcpus": 1, ' \
'"disk": 2, ' \
'"OS-FLV-EXT-DATA:ephemeral": 3, ' \
'"swap": 4' \
'}'
extra_dict = mock.Mock()
nc.flavor_get_extras.return_value = extra_dict
nc.flavor_create.return_value = mock.Mock(**{
'id': '1234',
'to_dict.return_value': {'id': '1234', 'name': 'flavor'}
})
flavor_data = {'name': 'flavor',
'memory': 12,
'vcpu': 1,
'disk': 2,
'ephemeral': 3,
'swap': 4,
'flavorid': '123',
'is_public': True}
request = self.mock_rest_request(body=flavor_req_data)
response = nova.Flavor().patch(request, '123')
self.assertStatusCode(response, 204)
nc.flavor_delete.assert_called_once_with(request, '123')
nc.flavor_create.assert_called_once_with(request, **flavor_data)
nc.flavor_get_extras.assert_called_once_with(request, '123', raw=True)
nc.flavor_extra_set.assert_called_once_with(request, '1234',
extra_dict)
@mock.patch.object(nova.api, 'nova')
def test_flavor_update_with_access_list(self, nc):
flavor_req_data = '{"name": "flavor", ' \
'"ram": 12, ' \
'"vcpus": 1, ' \
'"disk": 2, ' \
'"OS-FLV-EXT-DATA:ephemeral": 3, ' \
'"swap": 4, ' \
'"flavor_access": [{"id":"1", "name":"test"}]' \
'}'
nc.flavor_create.return_value = mock.Mock(**{
'id': '1234',
'to_dict.return_value': {'id': '1234', 'name': 'flavor'}
})
flavor_data = {'name': 'flavor',
'memory': 12,
'vcpu': 1,
'disk': 2,
'ephemeral': 3,
'swap': 4,
'flavorid': '123',
'is_public': False}
request = self.mock_rest_request(body=flavor_req_data)
response = nova.Flavor().patch(request, '123')
self.assertStatusCode(response, 204)
nc.flavor_delete.assert_called_once_with(request, '123')
nc.flavor_create.assert_called_once_with(request, **flavor_data)
nc.add_tenant_to_flavor.assert_called_once_with(request, '1234', '1')
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_public(self, nc, is_public=None):
if is_public:
request = self.mock_rest_request(GET={'is_public': 'tRuE'})
elif is_public is None:
request = self.mock_rest_request(GET={})
else:
request = self.mock_rest_request(GET={'is_public': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"id": "1"}, {"id": "2"}]})
nc.flavor_list.assert_called_once_with(request, is_public=is_public,
get_extras=False)
def test_flavor_list_private(self):
self._test_flavor_list_public(is_public=False)
def test_flavor_list_public(self):
self._test_flavor_list_public(is_public=True)
def test_flavor_list_public_none(self):
self._test_flavor_list_public(is_public=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_extras(self, nc, get_extras=None):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request(GET={})
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.json,
{"items": [{"extras": {}, "id": "1"},
{"extras": {}, "id": "2"}]})
else:
self.assertEqual(response.json,
{"items": [{"id": "1"}, {"id": "2"}]})
nc.flavor_list.assert_called_once_with(request, is_public=None,
get_extras=get_extras)
def test_flavor_list_extras_no(self):
self._test_flavor_list_extras(get_extras=False)
def test_flavor_list_extras_yes(self):
self._test_flavor_list_extras(get_extras=True)
def test_flavor_list_extras_absent(self):
self._test_flavor_list_extras(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def test_flavor_get_extra_specs(self, nc):
request = self.mock_rest_request()
nc.flavor_get_extras.return_value.to_dict.return_value = {'foo': '1'}
response = nova.FlavorExtraSpecs().get(request, "1")
self.assertStatusCode(response, 200)
nc.flavor_get_extras.assert_called_once_with(request, "1", raw=True)
@mock.patch.object(nova.api, 'nova')
def test_flavor_edit_extra_specs(self, nc):
request = self.mock_rest_request(
body='{"updated": {"a": "1", "b": "2"}, "removed": ["c", "d"]}'
)
response = nova.FlavorExtraSpecs().patch(request, '1')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
nc.flavor_extra_set.assert_called_once_with(
request, '1', {'a': '1', 'b': '2'}
)
nc.flavor_extra_delete.assert_called_once_with(
request, '1', ['c', 'd']
)
@mock.patch.object(nova.api, 'nova')
def test_aggregate_get_extra_specs(self, nc):
request = self.mock_rest_request()
nc.aggregate_get.return_value.metadata = {'a': '1', 'b': '2'}
response = nova.AggregateExtraSpecs().get(request, "1")
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"a": "1", "b": "2"})
nc.aggregate_get.assert_called_once_with(request, "1")
@mock.patch.object(nova.api, 'nova')
def test_aggregate_edit_extra_specs(self, nc):
request = self.mock_rest_request(
body='{"updated": {"a": "1", "b": "2"}, "removed": ["c", "d"]}'
)
response = nova.AggregateExtraSpecs().patch(request, '1')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
nc.aggregate_set_metadata.assert_called_once_with(
request, '1', {'a': '1', 'b': '2', 'c': None, 'd': None}
)
#
# Services
#
@test.create_stubs({api.base: ('is_service_enabled',)})
@mock.patch.object(nova.api, 'nova')
def test_services_get(self, nc):
request = self.mock_rest_request(GET={})
nc.service_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}})
]
api.base.is_service_enabled(request, 'compute').AndReturn(True)
self.mox.ReplayAll()
response = nova.Services().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content.decode('utf-8'),
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.service_list.assert_called_once_with(request)
@test.create_stubs({api.base: ('is_service_enabled',)})
def test_services_get_disabled(self):
request = self.mock_rest_request(GET={})
api.base.is_service_enabled(request, 'compute').AndReturn(False)
self.mox.ReplayAll()
response = nova.Services().get(request)
self.assertStatusCode(response, 501)
@test.create_stubs({api.base: ('is_service_enabled',)})
@test.create_stubs({quotas: ('get_disabled_quotas',)})
@mock.patch.object(nova.api, 'nova')
def test_quota_sets_defaults_get(self, nc):
filters = {'user': {'tenant_id': 'tenant'}}
request = self.mock_rest_request(**{'GET': dict(filters)})
api.base.is_service_enabled(request, 'compute').AndReturn(True)
quotas.get_disabled_quotas(request).AndReturn(['floating_ips'])
nc.default_quota_get.return_value = [
Quota('metadata_items', 100),
Quota('floating_ips', 1),
Quota('q2', 101)
]
self.mox.ReplayAll()
response = nova.DefaultQuotaSets().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [
{"limit": 100,
"display_name": "Metadata Items",
"name": "metadata_items"},
{"limit": 101,
"display_name": "Q2",
"name": "q2"}
]})
nc.default_quota_get.assert_called_once_with(request,
request.user.tenant_id)
@test.create_stubs({api.base: ('is_service_enabled',)})
@mock.patch.object(nova.api, 'nova')
def test_quota_sets_defaults_get_when_service_is_disabled(self, nc):
filters = {'user': {'tenant_id': 'tenant'}}
request = self.mock_rest_request(**{'GET': dict(filters)})
api.base.is_service_enabled(request, 'compute').AndReturn(False)
self.mox.ReplayAll()
response = nova.DefaultQuotaSets().get(request)
self.assertStatusCode(response, 501)
self.assertEqual(response.content.decode('utf-8'),
'"Service Nova is disabled."')
nc.default_quota_get.assert_not_called()
@test.create_stubs({api.base: ('is_service_enabled',)})
@test.create_stubs({quotas: ('get_disabled_quotas',)})
@mock.patch.object(nova.api, 'nova')
def test_quota_sets_defaults_patch(self, nc):
request = self.mock_rest_request(body='''
{"key_pairs": "15", "metadata_items": "5000",
"cores": "10", "instances": "20", "floating_ips": 10,
"injected_file_content_bytes": "15",
"injected_file_path_bytes": "5000",
"injected_files": "5", "ram": "10", "gigabytes": "5"}
''')
api.base.is_service_enabled(request, 'compute').AndReturn(True)
quotas.get_disabled_quotas(request).AndReturn(['floating_ips'])
self.mox.ReplayAll()
response = nova.DefaultQuotaSets().patch(request)
self.assertStatusCode(response, 204)
self.assertEqual(response.content.decode('utf-8'), '')
nc.default_quota_update.assert_called_once_with(
request, key_pairs='15',
metadata_items='5000', cores='10',
instances='20', injected_file_content_bytes='15',
injected_file_path_bytes='5000',
injected_files='5', ram='10')
@test.create_stubs({api.base: ('is_service_enabled',)})
@mock.patch.object(nova.api, 'nova')
def test_quota_sets_defaults_patch_when_service_is_disabled(self, nc):
request = self.mock_rest_request(body='''
{"key_pairs": "15", "metadata_items": "5000",
"cores": "10", "instances": "20", "floating_ips": 10,
"injected_file_content_bytes": "15",
"injected_file_path_bytes": "5000",
"injected_files": "5", "ram": "10", "gigabytes": "5"}
''')
api.base.is_service_enabled(request, 'compute').AndReturn(False)
self.mox.ReplayAll()
response = nova.DefaultQuotaSets().patch(request)
self.assertStatusCode(response, 501)
self.assertEqual(response.content.decode('utf-8'),
'"Service Nova is disabled."')
nc.default_quota_update.assert_not_called()
@mock.patch.object(nova, 'quotas')
@mock.patch.object(nova.api, 'nova')
def test_editable_quotas_get(self, nc, qc):
disabled_quotas = ['floating_ips', 'fixed_ips',
'security_groups', 'security_group_rules']
editable_quotas = ['cores', 'volumes', 'network', 'fixed_ips']
qc.get_disabled_quotas.return_value = disabled_quotas
qc.QUOTA_FIELDS = editable_quotas
request = self.mock_rest_request()
response = nova.EditableQuotaSets().get(request)
self.assertStatusCode(response, 200)
self.assertItemsCollectionEqual(response,
['cores', 'volumes', 'network'])
@mock.patch.object(nova.api, 'nova')
@mock.patch.object(nova.api, 'base')
@mock.patch.object(nova, 'quotas')
def test_quota_sets_patch(self, qc, bc, nc):
quota_data = dict(cores='15', instances='5',
ram='50000', metadata_items='150',
injected_files='5',
injected_file_content_bytes='10240',
floating_ips='50', fixed_ips='5',
security_groups='10',
security_group_rules='100')
request = self.mock_rest_request(body='''
{"cores": "15", "ram": "50000", "instances": "5",
"metadata_items": "150", "injected_files": "5",
"injected_file_content_bytes": "10240", "floating_ips": "50",
"fixed_ips": "5", "security_groups": "10" ,
"security_group_rules": "100", "volumes": "10"}
''')
qc.get_disabled_quotas.return_value = []
qc.NOVA_QUOTA_FIELDS = (n for n in quota_data)
bc.is_service_enabled.return_value = True
response = nova.QuotaSets().patch(request, 'spam123')
self.assertStatusCode(response, 204)
self.assertEqual(response.content.decode('utf-8'), '')
nc.tenant_quota_update.assert_called_once_with(
request, 'spam123', **quota_data)
@mock.patch.object(nova.api, 'nova')
@mock.patch.object(nova.api, 'base')
@mock.patch.object(nova, 'quotas')
def test_quota_sets_patch_when_service_is_disabled(self, qc, bc, nc):
quota_data = dict(cores='15', instances='5',
ram='50000', metadata_items='150',
injected_files='5',
injected_file_content_bytes='10240',
floating_ips='50', fixed_ips='5',
security_groups='10',
security_group_rules='100')
request = self.mock_rest_request(body='''
{"cores": "15", "ram": "50000", "instances": "5",
"metadata_items": "150", "injected_files": "5",
"injected_file_content_bytes": "10240", "floating_ips": "50",
"fixed_ips": "5", "security_groups": "10" ,
"security_group_rules": "100", "volumes": "10"}
''')
qc.get_disabled_quotas.return_value = []
qc.NOVA_QUOTA_FIELDS = (n for n in quota_data)
bc.is_service_enabled.return_value = False
response = nova.QuotaSets().patch(request, 'spam123')
self.assertStatusCode(response, 501)
self.assertEqual(response.content.decode('utf-8'),
'"Service Nova is disabled."')
nc.tenant_quota_update.assert_not_called()
|
|
"""
Utility classes and functions for the polynomial modules.
This module provides: error and warning objects; a polynomial base class;
and some routines used in both the `polynomial` and `chebyshev` modules.
Error objects
-------------
.. autosummary::
:toctree: generated/
PolyError base class for this sub-package's errors.
PolyDomainError raised when domains are mismatched.
Warning objects
---------------
.. autosummary::
:toctree: generated/
RankWarning raised in least-squares fit for rank-deficient matrix.
Base class
----------
.. autosummary::
:toctree: generated/
PolyBase Obsolete base class for the polynomial classes. Do not use.
Functions
---------
.. autosummary::
:toctree: generated/
as_series convert list of array_likes into 1-D arrays of common type.
trimseq remove trailing zeros.
trimcoef remove small trailing coefficients.
getdomain return the domain appropriate for a given set of abscissae.
mapdomain maps points between domains.
mapparms parameters of the linear map between domains.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = [
'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq',
'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase']
#
# Warnings and Exceptions
#
class RankWarning(UserWarning):
"""Issued by chebfit when the design matrix is rank deficient."""
pass
class PolyError(Exception):
"""Base class for errors in this module."""
pass
class PolyDomainError(PolyError):
"""Issued by the generic Poly class when two domains don't match.
This is raised when an binary operation is passed Poly objects with
different domains.
"""
pass
#
# Base class for all polynomial types
#
class PolyBase(object):
"""
Base class for all polynomial types.
Deprecated in numpy 1.9.0, use the abstract
ABCPolyBase class instead. Note that the latter
requires a number of virtual functions to be
implemented.
"""
pass
#
# Helper functions to convert inputs to 1-D arrays
#
def trimseq(seq):
"""Remove small Poly series coefficients.
Parameters
----------
seq : sequence
Sequence of Poly series coefficients. This routine fails for
empty sequences.
Returns
-------
series : sequence
Subsequence with trailing zeros removed. If the resulting sequence
would be empty, return the first element. The returned sequence may
or may not be a view.
Notes
-----
Do not lose the type info if the sequence contains unknown objects.
"""
if len(seq) == 0:
return seq
else:
for i in range(len(seq) - 1, -1, -1):
if seq[i] != 0:
break
return seq[:i+1]
def as_series(alist, trim=True):
"""
Return argument as a list of 1-d arrays.
The returned list contains array(s) of dtype double, complex double, or
object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of
size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays
of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array
raises a Value Error if it is not first reshaped into either a 1-d or 2-d
array.
Parameters
----------
alist : array_like
A 1- or 2-d array_like
trim : boolean, optional
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1-D arrays
A copy of the input data as a list of 1-d arrays.
Raises
------
ValueError
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
Examples
--------
>>> from numpy import polynomial as P
>>> a = np.arange(4)
>>> P.as_series(a)
[array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])]
>>> b = np.arange(6).reshape((2,3))
>>> P.as_series(b)
[array([ 0., 1., 2.]), array([ 3., 4., 5.])]
"""
arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]):
raise ValueError("Coefficient array is not 1-d")
if trim:
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]):
ret = []
for a in arrays:
if a.dtype != np.dtype(object):
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else:
ret.append(a.copy())
else:
try:
dtype = np.common_type(*arrays)
except:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
def trimcoef(c, tol=0):
"""
Remove "small" "trailing" coefficients from a polynomial.
"Small" means "small in absolute value" and is controlled by the
parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
both the 3-rd and 4-th order coefficients would be "trimmed."
Parameters
----------
c : array_like
1-d array of coefficients, ordered from lowest order to highest.
tol : number, optional
Trailing (i.e., highest order) elements with absolute value less
than or equal to `tol` (default value is zero) are removed.
Returns
-------
trimmed : ndarray
1-d array with trailing zeros removed. If the resulting series
would be empty, a series containing a single zero is returned.
Raises
------
ValueError
If `tol` < 0
See Also
--------
trimseq
Examples
--------
>>> from numpy import polynomial as P
>>> P.trimcoef((0,0,3,0,5,0,0))
array([ 0., 0., 3., 0., 5.])
>>> P.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([ 0.])
>>> i = complex(0,1) # works for complex
>>> P.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([ 0.0003+0.j , 0.0010-0.001j])
"""
if tol < 0:
raise ValueError("tol must be non-negative")
[c] = as_series([c])
[ind] = np.where(np.abs(c) > tol)
if len(ind) == 0:
return c[:1]*0
else:
return c[:ind[-1] + 1].copy()
def getdomain(x):
"""
Return a domain suitable for given abscissae.
Find a domain suitable for a polynomial or Chebyshev series
defined at the values supplied.
Parameters
----------
x : array_like
1-d array of abscissae whose domain will be determined.
Returns
-------
domain : ndarray
1-d array containing two values. If the inputs are complex, then
the two returned points are the lower left and upper right corners
of the smallest rectangle (aligned with the axes) in the complex
plane containing the points `x`. If the inputs are real, then the
two points are the ends of the smallest interval containing the
points `x`.
See Also
--------
mapparms, mapdomain
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> points = np.arange(4)**2 - 5; points
array([-5, -4, -1, 4])
>>> pu.getdomain(points)
array([-5., 4.])
>>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle
>>> pu.getdomain(c)
array([-1.-1.j, 1.+1.j])
"""
[x] = as_series([x], trim=False)
if x.dtype.char in np.typecodes['Complex']:
rmin, rmax = x.real.min(), x.real.max()
imin, imax = x.imag.min(), x.imag.max()
return np.array((complex(rmin, imin), complex(rmax, imax)))
else:
return np.array((x.min(), x.max()))
def mapparms(old, new):
"""
Linear map parameters between domains.
Return the parameters of the linear map ``offset + scale*x`` that maps
`old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``.
Parameters
----------
old, new : array_like
Domains. Each domain must (successfully) convert to a 1-d array
containing precisely two values.
Returns
-------
offset, scale : scalars
The map ``L(x) = offset + scale*x`` maps the first domain to the
second.
See Also
--------
getdomain, mapdomain
Notes
-----
Also works for complex numbers, and thus can be used to calculate the
parameters required to map any line in the complex plane to any other
line therein.
Examples
--------
>>> from numpy import polynomial as P
>>> P.mapparms((-1,1),(-1,1))
(0.0, 1.0)
>>> P.mapparms((1,-1),(-1,1))
(0.0, -1.0)
>>> i = complex(0,1)
>>> P.mapparms((-i,-1),(1,i))
((1+1j), (1+0j))
"""
oldlen = old[1] - old[0]
newlen = new[1] - new[0]
off = (old[1]*new[0] - old[0]*new[1])/oldlen
scl = newlen/oldlen
return off, scl
def mapdomain(x, old, new):
"""
Apply linear map to input points.
The linear map ``offset + scale*x`` that maps the domain `old` to
the domain `new` is applied to the points `x`.
Parameters
----------
x : array_like
Points to be mapped. If `x` is a subtype of ndarray the subtype
will be preserved.
old, new : array_like
The two domains that determine the map. Each must (successfully)
convert to 1-d arrays containing precisely two values.
Returns
-------
x_out : ndarray
Array of points of the same shape as `x`, after application of the
linear map between the two domains.
See Also
--------
getdomain, mapparms
Notes
-----
Effectively, this implements:
.. math ::
x\\_out = new[0] + m(x - old[0])
where
.. math ::
m = \\frac{new[1]-new[0]}{old[1]-old[0]}
Examples
--------
>>> from numpy import polynomial as P
>>> old_domain = (-1,1)
>>> new_domain = (0,2*np.pi)
>>> x = np.linspace(-1,1,6); x
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
>>> x_out = P.mapdomain(x, old_domain, new_domain); x_out
array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825,
6.28318531])
>>> x - P.mapdomain(x_out, new_domain, old_domain)
array([ 0., 0., 0., 0., 0., 0.])
Also works for complex numbers (and thus can be used to map any line in
the complex plane to any other line therein).
>>> i = complex(0,1)
>>> old = (-1 - i, 1 + i)
>>> new = (-1 + i, 1 - i)
>>> z = np.linspace(old[0], old[1], 6); z
array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ])
>>> new_z = P.mapdomain(z, old, new); new_z
array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ])
"""
x = np.asanyarray(x)
off, scl = mapparms(old, new)
return off + scl*x
|
|
# This file is part of the django-environ.
#
# Copyright (c) 2021, Serghei Iakovlev <egrep@protonmail.ch>
# Copyright (c) 2013-2021, Daniele Faraglia <daniele.faraglia@gmail.com>
#
# For the full copyright and license information, please view
# the LICENSE.txt file that was distributed with this source code.
from unittest import mock
import pytest
import environ.compat
from environ import Env
from environ.compat import PYMEMCACHE_DRIVER, REDIS_DRIVER, ImproperlyConfigured
def test_base_options_parsing():
url = ('memcache://127.0.0.1:11211/?timeout=0&'
'key_prefix=cache_&key_function=foo.get_key&version=1')
url = Env.cache_url_config(url)
assert url['KEY_PREFIX'] == 'cache_'
assert url['KEY_FUNCTION'] == 'foo.get_key'
assert url['TIMEOUT'] == 0
assert url['VERSION'] == 1
url = 'redis://127.0.0.1:6379/?timeout=None'
url = Env.cache_url_config(url)
assert url['TIMEOUT'] is None
@pytest.mark.parametrize(
'url,backend,location',
[
('dbcache://my_cache_table',
'django.core.cache.backends.db.DatabaseCache', 'my_cache_table'),
('filecache:///var/tmp/django_cache',
'django.core.cache.backends.filebased.FileBasedCache',
'/var/tmp/django_cache'),
('filecache://C:/foo/bar',
'django.core.cache.backends.filebased.FileBasedCache', 'C:/foo/bar'),
('locmemcache://',
'django.core.cache.backends.locmem.LocMemCache', ''),
('locmemcache://unique-snowflake',
'django.core.cache.backends.locmem.LocMemCache', 'unique-snowflake'),
('dummycache://',
'django.core.cache.backends.dummy.DummyCache', ''),
('rediss://127.0.0.1:6379/1', REDIS_DRIVER,
'rediss://127.0.0.1:6379/1'),
('rediscache://:redispass@127.0.0.1:6379/0', REDIS_DRIVER,
'redis://:redispass@127.0.0.1:6379/0'),
('rediscache://host1:6379,host2:6379,host3:9999/1', REDIS_DRIVER,
['redis://host1:6379/1', 'redis://host2:6379/1',
'redis://host3:9999/1']),
('rediscache:///path/to/socket:1', 'django_redis.cache.RedisCache',
'unix:///path/to/socket:1'),
('memcache:///tmp/memcached.sock',
'django.core.cache.backends.memcached.MemcachedCache',
'unix:/tmp/memcached.sock'),
('memcache://172.19.26.240:11211,172.19.26.242:11212',
'django.core.cache.backends.memcached.MemcachedCache',
['172.19.26.240:11211', '172.19.26.242:11212']),
('memcache://127.0.0.1:11211',
'django.core.cache.backends.memcached.MemcachedCache',
'127.0.0.1:11211'),
('pymemcache://127.0.0.1:11211',
PYMEMCACHE_DRIVER,
'127.0.0.1:11211'),
('pymemcache://memcached:11211/?key_prefix=ci',
PYMEMCACHE_DRIVER,
'memcached:11211'),
],
ids=[
'dbcache',
'filecache',
'filecache_win',
'locmemcache_empty',
'locmemcache',
'dummycache',
'rediss',
'redis_with_password',
'redis_multiple',
'redis_socket',
'memcached_socket',
'memcached_multiple',
'memcached',
'pylibmccache',
'pylibmccache_trailing_slash',
],
)
def test_cache_parsing(url, backend, location):
url = Env.cache_url_config(url)
assert url['BACKEND'] == backend
assert url['LOCATION'] == location
@pytest.mark.parametrize('django_version', ((3, 2), (3, 1), None))
@pytest.mark.parametrize('pymemcache_installed', (True, False))
def test_pymemcache_compat(django_version, pymemcache_installed):
old = 'django.core.cache.backends.memcached.PyLibMCCache'
new = 'django.core.cache.backends.memcached.PyMemcacheCache'
with mock.patch.object(environ.compat, 'DJANGO_VERSION', django_version):
with mock.patch('environ.compat.find_loader') as mock_find_loader:
mock_find_loader.return_value = pymemcache_installed
driver = environ.compat.choose_pymemcache_driver()
if django_version and django_version < (3, 2):
assert driver == old
else:
assert driver == new if pymemcache_installed else old
def test_redis_parsing():
url = ('rediscache://127.0.0.1:6379/1?client_class='
'django_redis.client.DefaultClient&password=secret')
url = Env.cache_url_config(url)
assert url['BACKEND'] == REDIS_DRIVER
assert url['LOCATION'] == 'redis://127.0.0.1:6379/1'
assert url['OPTIONS'] == {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': 'secret',
}
def test_redis_socket_url():
url = 'redis://:redispass@/path/to/socket.sock?db=0'
url = Env.cache_url_config(url)
assert REDIS_DRIVER == url['BACKEND']
assert url['LOCATION'] == 'unix://:redispass@/path/to/socket.sock'
assert url['OPTIONS'] == {
'DB': 0
}
def test_options_parsing():
url = 'filecache:///var/tmp/django_cache?timeout=60&max_entries=1000&cull_frequency=0'
url = Env.cache_url_config(url)
assert url['BACKEND'] == 'django.core.cache.backends.filebased.FileBasedCache'
assert url['LOCATION'] == '/var/tmp/django_cache'
assert url['TIMEOUT'] == 60
assert url['OPTIONS'] == {
'MAX_ENTRIES': 1000,
'CULL_FREQUENCY': 0,
}
def test_custom_backend():
url = 'memcache://127.0.0.1:5400?foo=option&bars=9001'
backend = 'django_redis.cache.RedisCache'
url = Env.cache_url_config(url, backend)
assert url['BACKEND'] == backend
assert url['LOCATION'] == '127.0.0.1:5400'
assert url['OPTIONS'] == {
'FOO': 'option',
'BARS': 9001,
}
def test_unknown_backend():
url = 'unknown-scheme://127.0.0.1:1000'
with pytest.raises(ImproperlyConfigured) as excinfo:
Env.cache_url_config(url)
assert str(excinfo.value) == 'Invalid cache schema unknown-scheme'
def test_empty_url_is_mapped_to_empty_config():
assert Env.cache_url_config('') == {}
assert Env.cache_url_config(None) == {}
@pytest.mark.parametrize(
'chars',
['!', '$', '&', "'", '(', ')', '*', '+', ';', '=', '-', '.', '-v1.2']
)
def test_cache_url_password_using_sub_delims(monkeypatch, chars):
"""Ensure CACHE_URL passwords may contains some unsafe characters.
See: https://github.com/joke2k/django-environ/issues/200 for details."""
url = 'rediss://enigma:secret{}@ondigitalocean.com:25061/2'.format(chars)
monkeypatch.setenv('CACHE_URL', url)
env = Env()
result = env.cache()
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
result = env.cache_url_config(url)
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
url = 'rediss://enigma:sec{}ret@ondigitalocean.com:25061/2'.format(chars)
monkeypatch.setenv('CACHE_URL', url)
env = Env()
result = env.cache()
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
result = env.cache_url_config(url)
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
url = 'rediss://enigma:{}secret@ondigitalocean.com:25061/2'.format(chars)
monkeypatch.setenv('CACHE_URL', url)
env = Env()
result = env.cache()
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
result = env.cache_url_config(url)
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
@pytest.mark.parametrize(
'chars', ['%3A', '%2F', '%3F', '%23', '%5B', '%5D', '%40', '%2C']
)
def test_cache_url_password_using_gen_delims(monkeypatch, chars):
"""Ensure CACHE_URL passwords may contains %-encoded characters.
See: https://github.com/joke2k/django-environ/issues/200 for details."""
url = 'rediss://enigma:secret{}@ondigitalocean.com:25061/2'.format(chars)
monkeypatch.setenv('CACHE_URL', url)
env = Env()
result = env.cache()
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
url = 'rediss://enigma:sec{}ret@ondigitalocean.com:25061/2'.format(chars)
monkeypatch.setenv('CACHE_URL', url)
env = Env()
result = env.cache()
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
url = 'rediss://enigma:{}secret@ondigitalocean.com:25061/2'.format(chars)
monkeypatch.setenv('CACHE_URL', url)
env = Env()
result = env.cache()
assert result['BACKEND'] == 'django_redis.cache.RedisCache'
assert result['LOCATION'] == url
def test_cache_url_env_using_default():
env = Env(CACHE_URL=(str, "locmemcache://"))
result = env.cache()
assert result["BACKEND"] == "django.core.cache.backends.locmem.LocMemCache"
assert result["LOCATION"] == ""
|
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
import sys
from acq4.devices.Device import TaskGui
from acq4.util.SequenceRunner import *
from acq4.pyqtgraph.WidgetGroup import WidgetGroup
import numpy
from TaskTemplate import *
from acq4.util.debug import *
import sip
class MultiClampTaskGui(TaskGui):
#sigSequenceChanged = QtCore.Signal(object) ## defined upstream
def __init__(self, dev, taskRunner):
TaskGui.__init__(self, dev, taskRunner)
daqDev = self.dev.getDAQName()
self.daqUI = self.taskRunner.getDevice(daqDev)
self.traces = {} ## Stores traces from a sequence to allow average plotting
self.resetInpPlots = False ## Signals result handler to clear plots before adding a new one
self.currentCmdPlot = None
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.splitter_2.setStretchFactor(0, 0)
self.ui.splitter_2.setStretchFactor(1, 1)
self.ui.splitter.setStretchFactor(0, 3)
self.ui.splitter.setStretchFactor(1, 1)
self.stateGroup = WidgetGroup(self)
#self.ui.waveGeneratorWidget.setTimeScale(1e-3)
self.ui.waveGeneratorWidget.setMeta('x', units='s', siPrefix=True, dec=True, step=0.5, minStep=1e-6)
self.unitLabels = [self.ui.waveGeneratorLabel, self.ui.holdingCheck]
#self.modeSignalList = self.dev.listModeSignals()
self.mode = None
self.setMode('I=0')
self.ui.topPlotWidget.registerPlot(self.dev.name() + '.Input')
self.ui.topPlotWidget.setDownsampling(ds=True, auto=True, mode='peak')
self.ui.topPlotWidget.setClipToView(True)
self.ui.bottomPlotWidget.registerPlot(self.dev.name() + '.Command')
self.ui.bottomPlotWidget.setDownsampling(ds=True, auto=True, mode='peak')
self.ui.bottomPlotWidget.setClipToView(True)
self.daqChanged(self.daqUI.currentState())
self.daqUI.sigChanged.connect(self.daqChanged)
self.ui.waveGeneratorWidget.sigDataChanged.connect(self.updateWaves)
self.ui.waveGeneratorWidget.sigParametersChanged.connect(self.sequenceChanged)
self.stateGroup.sigChanged.connect(self.uiStateChanged)
self.dev.sigStateChanged.connect(self.devStateChanged)
self.devStateChanged()
def uiStateChanged(self, name, value):
if 'ModeRadio' in name:
self.setMode()
#i0Checks = [self.ui.holdingCheck, self.ui.primaryGainCheck, self.ui.secondaryGainCheck]
if self.getMode() == 'I=0':
self.ui.holdingCheck.setChecked(False)
self.ui.holdingCheck.setEnabled(False)
#for c in i0Checks:
#c.setChecked(False)
#c.setEnabled(False)
else:
self.ui.holdingCheck.setEnabled(True)
#for c in i0Checks:
#c.setEnabled(True)
checkMap = {
'holdingCheck': self.ui.holdingSpin,
'primarySignalCheck': self.ui.primarySignalCombo,
'secondarySignalCheck': self.ui.secondarySignalCombo,
'primaryGainCheck': self.ui.primaryGainSpin,
'secondaryGainCheck': self.ui.secondaryGainSpin,
}
## For each check box, enable its corresponding control
if name in checkMap:
checkMap[name].setEnabled(value)
self.devStateChanged()
def devStateChanged(self, state=None):
mode = self.getMode()
state = self.dev.getLastState(mode)
if not self.ui.holdingSpin.isEnabled():
self.ui.holdingSpin.setValue(state['holding'])
if not self.ui.primaryGainSpin.isEnabled():
self.ui.primaryGainSpin.setValue(state['primaryGain'])
if not self.ui.secondaryGainSpin.isEnabled():
self.ui.secondaryGainSpin.setValue(state['secondaryGain'])
psig = ssig = None
if not self.ui.primarySignalCombo.isEnabled():
psig = state['primarySignal']
if not self.ui.secondarySignalCombo.isEnabled():
ssig = state['secondarySignal']
self.setSignals(psig, ssig)
def saveState(self):
state = self.stateGroup.state().copy()
state['mode'] = self.getMode()
state['primarySignal'] = str(self.ui.primarySignalCombo.currentText())
state['secondarySignal'] = str(self.ui.secondarySignalCombo.currentText())
return state
def restoreState(self, state):
try:
self.setMode(state['mode'])
if 'primarySignal' in state and 'secondarySignal' in state:
self.setSignals(state['primarySignal'], state['secondarySignal'])
self.stateGroup.setState(state)
except:
printExc('Error while restoring MultiClamp task GUI state:')
#self.ui.waveGeneratorWidget.update() ## should be called as a result of stateGroup.setState; don't need to call again
def daqChanged(self, state):
self.rate = state['rate']
self.numPts = state['numPts']
self.timeVals = numpy.linspace(0, float(self.numPts)/self.rate, self.numPts)
self.updateWaves()
def listSequence(self):
return self.ui.waveGeneratorWidget.listSequences()
def sequenceChanged(self):
#self.emit(QtCore.SIGNAL('sequenceChanged'), self.dev.name)
self.sigSequenceChanged.emit(self.dev.name())
def updateWaves(self):
self.clearCmdPlots()
## display sequence waves
params = {}
ps = self.ui.waveGeneratorWidget.listSequences()
for k in ps:
params[k] = range(len(ps[k]))
waves = []
runSequence(lambda p: waves.append(self.getSingleWave(p)), params, params.keys())
for w in waves:
if w is not None:
#self.plotCmdWave(w / self.cmdScale, color=QtGui.QColor(100, 100, 100), replot=False)
self.plotCmdWave(w, color=QtGui.QColor(100, 100, 100), replot=False)
## display single-mode wave in red
single = self.getSingleWave()
if single is not None:
#self.plotCmdWave(single / self.cmdScale, color=QtGui.QColor(200, 100, 100))
p = self.plotCmdWave(single, color=QtGui.QColor(200, 100, 100))
p.setZValue(1000)
#self.paramListChanged
def clearCmdPlots(self):
self.ui.bottomPlotWidget.clear()
self.currentCmdPlot = None
def taskSequenceStarted(self):
self.resetInpPlots = True
def clearInpPlots(self):
self.traces = {}
self.ui.topPlotWidget.clear()
def taskStarted(self, params):
## Draw green trace for current command waveform
if self.currentCmdPlot is not None:
self.ui.bottomPlotWidget.removeItem(self.currentCmdPlot)
params = dict([(p[1], params[p]) for p in params if p[0] == self.dev.name()])
cur = self.getSingleWave(params)
if cur is not None:
self.currentCmdPlot = self.plotCmdWave(cur, color=QtGui.QColor(100, 200, 100))
self.currentCmdPlot.setZValue(1001)
def plotCmdWave(self, data, color=QtGui.QColor(100, 100, 100), replot=True):
if data is None:
return
plot = self.ui.bottomPlotWidget.plot(data, x=self.timeVals)
plot.setPen(QtGui.QPen(color))
return plot
def generateTask(self, params=None):
state = self.stateGroup.state()
if params is None:
params = {}
task = {}
mode = self.getMode()
task['mode'] = mode
task['recordState'] = True
#if self.ui.primarySignalCheck.isChecked():
#task['primary'] = self.ui.primarySignalCombo.currentText()
#if self.ui.secondarySignalCheck.isChecked():
#task['secondary'] = self.ui.secondarySignalCombo.currentText()
if state['primarySignalCheck']:
task['primarySignal'] = state['primarySignalCombo']
if state['secondarySignalCheck']:
task['secondarySignal'] = state['secondarySignalCombo']
if state['primaryGainCheck']:
task['primaryGain'] = state['primaryGainSpin']
if state['secondaryGainCheck']:
task['secondaryGain'] = state['secondaryGainSpin']
if mode != 'I=0':
## Must scale command to V or A before sending to task system.
wave = self.getSingleWave(params)
if wave is not None:
task['command'] = wave
if state['holdingCheck']:
task['holding'] = state['holdingSpin']
#print "Task:", task
return task
def getSingleWave(self, params=None):
state = self.stateGroup.state()
h = state['holdingSpin']
#if state['holdingCheck']:
#h = state['holdingSpin']
#else:
#h = 0.0
self.ui.waveGeneratorWidget.setOffset(h)
#self.ui.waveGeneratorWidget.setScale(self.cmdScale)
## waveGenerator generates values in V or A
wave = self.ui.waveGeneratorWidget.getSingle(self.rate, self.numPts, params)
if wave is None:
return None
#if state['holdingCheck']:
#wave += (state['holdingSpin'] / self.cmdScale)
return wave
def getMode(self):
if self.ui.icModeRadio.isChecked():
self.mode = 'IC'
elif self.ui.i0ModeRadio.isChecked():
self.mode = 'I=0'
else:
self.mode = 'VC'
return self.mode
def setMode(self, mode=None):
if mode != self.mode:
oldMode = self.mode
if mode is None:
mode = self.getMode()
#print "Set mode to", mode
# set radio button
if mode == 'IC':
self.ui.icModeRadio.setChecked(True)
elif mode == 'I=0':
self.ui.i0ModeRadio.setChecked(True)
else:
self.ui.vcModeRadio.setChecked(True)
# update signal lists
self.stateGroup.blockSignals(True)
sigs = self.dev.listSignals(mode)
#print "Signals:", sigs
#print "-------"
for s, c in [(sigs[0], self.ui.primarySignalCombo),(sigs[1], self.ui.secondarySignalCombo)]:
c.clear()
for ss in s:
c.addItem(ss)
self.stateGroup.blockSignals(False)
#self.ui.primarySignalCombo.clear()
#for s in self.modeSignalList['primary'][mode]:
#self.ui.primarySignalCombo.addItem(s)
#self.ui.secondarySignalCombo.clear()
#for s in self.modeSignalList['secondary'][mode]:
#self.ui.secondarySignalCombo.addItem(s)
# Disable signal, holding, and gain checks (only when switching between v and i modes)
if mode == 'VC' or oldMode == 'VC':
self.ui.primarySignalCheck.setChecked(False)
self.ui.secondarySignalCheck.setChecked(False)
self.ui.holdingCheck.setChecked(False)
self.ui.holdingSpin.setValue(0.0)
self.ui.primaryGainCheck.setChecked(False)
self.ui.secondaryGainCheck.setChecked(False)
# update unit labels and scaling
if mode == 'VC':
newUnit = 'V'
oldUnit = 'A'
#self.cmdScale = 1e-3
#self.inpScale = 1e-12
spinOpts = dict(suffix='V', siPrefix=True, dec=True, step=0.5, minStep=1e-3)
self.ui.waveGeneratorWidget.setMeta('y', **spinOpts)
self.ui.waveGeneratorWidget.setMeta('xy', units='V*s', siPrefix=True, dec=True, step=0.5, minStep=1e-6)
else:
newUnit = 'A'
oldUnit = 'V'
#self.cmdScale = 1e-12
#self.inpScale = 1e-3
spinOpts = dict(suffix='A', siPrefix=True, dec=True, step=0.5, minStep=1e-12)
self.ui.waveGeneratorWidget.setMeta('y', **spinOpts)
self.ui.waveGeneratorWidget.setMeta('xy', units='C', siPrefix=True, dec=True, step=0.5, minStep=1e-15)
#self.stateGroup.setScale(self.ui.holdingSpin, 1./self.cmdScale)
self.ui.holdingSpin.setOpts(**spinOpts)
#self.ui.waveGeneratorWidget.setScale(self.cmdScale)
for l in self.unitLabels:
text = str(l.text())
l.setText(text.replace(oldUnit, newUnit))
self.ui.topPlotWidget.setLabel('left', units=oldUnit)
self.ui.bottomPlotWidget.setLabel('left', units=newUnit)
## Hide stim plot for I=0 mode
if mode == 'I=0':
self.ui.bottomPlotWidget.hide()
else:
self.ui.bottomPlotWidget.show()
self.devStateChanged()
self.mode = mode
def setSignals(self, pri, sec):
#print "setSignals", pri, sec
for c, s in [(self.ui.primarySignalCombo, pri), (self.ui.secondarySignalCombo, sec)]:
if s is None:
continue
ind = c.findText(s)
if ind == -1:
for i in range(c.count()):
print c.itemText(i)
raise Exception('Signal "%s" does not exist' % s)
c.setCurrentIndex(ind)
def handleResult(self, result, params):
if self.resetInpPlots:
self.resetInpPlots = False
self.clearInpPlots()
## Plot the results
#plot = self.ui.topPlotWidget.plot(result['primary'].view(numpy.ndarray) / self.inpScale, x=result.xvals('Time'), params=params)
plot = self.ui.topPlotWidget.plot(result['primary'].view(numpy.ndarray), x=result.xvals('Time'), params=params)
def quit(self):
TaskGui.quit(self)
if not sip.isdeleted(self.daqUI):
QtCore.QObject.disconnect(self.daqUI, QtCore.SIGNAL('changed'), self.daqChanged)
self.ui.topPlotWidget.close()
self.ui.bottomPlotWidget.close()
|
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.utils import six
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import PERMISSION_DENIED
from djblets.webapi.testing.decorators import webapi_test_template
from reviewboard.webapi.resources import resources
from reviewboard.webapi.errors import INVALID_USER
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
review_group_user_item_mimetype, review_group_user_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_review_group_user_item_url,
get_review_group_user_list_url,
get_user_item_url)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(BaseWebAPITestCase):
"""Testing the ReviewGroupUserResource list API tests."""
fixtures = ['test_users']
sample_api_url = 'groups/<name>/users/'
resource = resources.review_group_user
basic_post_use_admin = True
def compare_item(self, item_rsp, user):
self.assertEqual(item_rsp['id'], user.pk)
self.assertEqual(item_rsp['username'], user.username)
self.assertEqual(item_rsp['first_name'], user.first_name)
self.assertEqual(item_rsp['last_name'], user.last_name)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
group = self.create_review_group(with_local_site=with_local_site)
if populate_items:
items = [
User.objects.get(username='doc'),
User.objects.get(username='grumpy'),
]
group.users = items
else:
items = []
return (get_review_group_user_list_url(group.name, local_site_name),
review_group_user_list_mimetype,
items)
@webapi_test_template
def test_get_with_no_access(self):
"""Testing the GET <URL> API without access to invite-only group"""
group = self.create_review_group(name='priv-group', invite_only=True)
rsp = self.api_get(get_review_group_user_list_url(group.name),
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_get_multiple_groups(self):
"""Testing GET <URL> API with a user in multiple groups"""
doc = User.objects.get(username='doc')
groups = [
self.create_review_group('group1'),
self.create_review_group('group2'),
]
for group in groups:
group.users.add(doc)
rsp = self.api_get(
get_review_group_user_list_url(groups[0].name),
expected_mimetype=review_group_user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['total_results'], 1)
self.compare_item(rsp['users'][0], doc)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
group = self.create_review_group(with_local_site=with_local_site)
if post_valid_data:
post_data = {
'username': 'doc',
}
else:
post_data = {}
return (get_review_group_user_list_url(group.name, local_site_name),
review_group_user_item_mimetype,
post_data,
[group])
def check_post_result(self, user, rsp, group):
users = list(group.users.all())
self.assertEqual(len(users), 1)
self.assertEqual(users[0].username, 'doc')
self.compare_item(rsp['user'], users[0])
@webapi_test_template
def test_post_with_no_access(self, local_site=None):
"""Testing the POST <URL> API with Permission Denied"""
group = self.create_review_group()
user = User.objects.get(pk=1)
rsp = self.api_post(
get_review_group_user_list_url(group.name, local_site),
{'username': user.username},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
@webapi_test_template
def test_post_with_invalid_user(self):
"""Testing the POST <URL> API with invalid user"""
self._login_user(admin=True)
group = self.create_review_group()
rsp = self.api_post(
get_review_group_user_list_url(group.name),
{'username': 'grabl'},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_USER.code)
self.assertEqual(group.users.count(), 0)
@webapi_test_template
def test_post_with_self(self):
"""Testing the POST <URL> API with the requesting user"""
group = self.create_review_group()
self.assertFalse(self.user.is_superuser)
rsp = self.api_post(
get_review_group_user_list_url(group.name),
{'username': self.user.username},
expected_mimetype=review_group_user_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(group.users.count(), 1)
@webapi_test_template
def test_post_with_self_and_private_group(self):
"""Testing the POST <URL> API with the requesting user and private
group
"""
group = self.create_review_group(invite_only=True)
self.assertFalse(group.is_accessible_by(self.user))
rsp = self.api_post(
get_review_group_user_list_url(group.name),
{'username': self.user.username},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(group.users.count(), 0)
@add_fixtures(['test_site'])
@webapi_test_template
def test_post_with_self_and_site(self):
"""Testing the POST <URL> API with the requesting user on a local site
"""
self.assertFalse(self.user.is_superuser)
local_site = self.get_local_site(name=self.local_site_name)
local_site.users.add(self.user)
group = self.create_review_group(with_local_site=True)
self.assertEqual(group.users.count(), 0)
rsp = self.api_post(
get_review_group_user_list_url(group.name, self.local_site_name),
{'username': self.user.username},
expected_mimetype=review_group_user_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(group.users.count(), 1)
@add_fixtures(['test_site'])
@webapi_test_template
def test_post_with_self_and_unjoined_site(self):
"""Testing the POST <URL> API with the requesting user on an unjoined
local site
"""
self.assertFalse(self.user.is_superuser)
group = self.create_review_group(with_local_site=True)
self.assertEqual(group.users.count(), 0)
rsp = self.api_post(
get_review_group_user_list_url(group.name, self.local_site_name),
{'username': self.user.username},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(group.users.count(), 0)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(BaseWebAPITestCase):
"""Testing the ReviewGroupUserResource item API tests."""
fixtures = ['test_users']
sample_api_url = 'groups/<name>/users/<username>/'
resource = resources.review_group_user
basic_delete_use_admin = True
basic_put_use_admin = True
def setup_http_not_allowed_item_test(self, user):
return get_review_group_user_list_url('my-group')
def compare_item(self, item_rsp, user):
self.assertEqual(item_rsp['id'], user.pk)
self.assertEqual(item_rsp['username'], user.username)
self.assertEqual(item_rsp['first_name'], user.first_name)
self.assertEqual(item_rsp['last_name'], user.last_name)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
group = self.create_review_group(with_local_site=with_local_site)
doc = User.objects.get(username='doc')
group.users.add(doc)
return (get_review_group_user_item_url(group.name, doc.username,
local_site_name),
[group, doc])
def check_delete_result(self, user, group, doc):
self.assertNotIn(doc, group.users.all())
@webapi_test_template
def test_delete_with_self(self):
"""Testing the DELETE <URL> API with the requesting user
"""
group = self.create_review_group()
group.users.add(self.user)
self.assertFalse(self.user.is_superuser)
self.api_delete(
get_review_group_user_item_url(group.name, self.user.username))
self.assertEqual(group.users.count(), 0)
@add_fixtures(['test_site'])
@webapi_test_template
def test_delete_with_self_with_site(self):
"""Testing the DELETE <URL> API with the requesting user on local site
"""
self.assertFalse(self.user.is_superuser)
local_site = self.get_local_site(name=self.local_site_name)
local_site.users.add(self.user)
group = self.create_review_group(with_local_site=True)
group.users.add(self.user)
self.assertEqual(group.users.count(), 1)
self.api_delete(
get_review_group_user_item_url(group.name, self.user.username,
self.local_site_name))
self.assertEqual(group.users.count(), 0)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
group = self.create_review_group(with_local_site=with_local_site)
doc = User.objects.get(username='doc')
group.users.add(doc)
return (get_review_group_user_item_url(group.name, doc.username,
local_site_name),
review_group_user_item_mimetype,
doc)
@webapi_test_template
def test_get_delete_link(self):
"""Testing GET <URL> API contains the correct DELETE link"""
doc = User.objects.get(username='doc')
group = self.create_review_group()
group.users.add(doc)
rsp = self.api_get(
get_review_group_user_item_url(group.name, doc.username),
expected_mimetype=review_group_user_item_mimetype)
delete_href = \
rsp['user']['links']['delete']['href'][len(self.base_url):]
self.assertEqual(
delete_href,
get_review_group_user_item_url(group.name, doc.username))
self.assertNotEqual(delete_href, get_user_item_url(doc.username))
@add_fixtures(['test_site'])
@webapi_test_template
def test_get_delete_link_local_site(self):
"""Testing GET <URL> API contains the correct DELETE link with a local
site
"""
doc = User.objects.get(username='doc')
local_site = self.get_local_site(name=self.local_site_name)
local_site.users.add(self.user)
local_site.users.add(doc)
group = self.create_review_group(local_site=local_site)
group.users.add(doc)
rsp = self.api_get(
get_review_group_user_item_url(group.name, doc.username,
local_site.name),
expected_mimetype=review_group_user_item_mimetype)
delete_href = \
rsp['user']['links']['delete']['href'][len(self.base_url):]
self.assertEqual(
delete_href,
get_review_group_user_item_url(group.name, doc.username,
local_site.name))
self.assertNotEqual(delete_href, get_user_item_url(doc.username,
local_site.name))
|
|
from typing import Any, List, Dict, Optional, Text
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpRequest
from django.shortcuts import redirect, render
from django.utils import translation
from django.utils.cache import patch_cache_control
from six.moves import zip_longest, zip, range
from zerver.decorator import zulip_login_required, process_client
from zerver.forms import ToSForm
from zerver.lib.realm_icon import realm_icon_url
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmDomain, \
RealmFilter, PreregistrationUser, UserActivity, \
UserPresence, get_recipient, name_changes_disabled, email_to_username, \
get_realm_domains
from zerver.lib.events import do_events_register
from zerver.lib.actions import update_user_presence, do_change_tos_version, \
do_update_pointer, realm_user_count
from zerver.lib.avatar import avatar_url
from zerver.lib.i18n import get_language_list, get_language_name, \
get_language_list_for_templates
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.lib.streams import access_stream_by_name
from zerver.lib.utils import statsd, get_subdomain
import calendar
import datetime
import logging
import os
import re
import simplejson
import time
@zulip_login_required
def accounts_accept_terms(request):
# type: (HttpRequest) -> HttpResponse
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
do_change_tos_version(request.user, settings.TOS_VERSION)
return redirect(home)
else:
form = ToSForm()
email = request.user.email
special_message_template = None
if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
special_message_template = 'zerver/' + settings.FIRST_TIME_TOS_TEMPLATE
return render(
request,
'zerver/accounts_accept_terms.html',
context={'form': form,
'email': email,
'special_message_template': special_message_template},
)
def sent_time_in_epoch_seconds(user_message):
# type: (Optional[UserMessage]) -> Optional[float]
if user_message is None:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
def home(request):
# type: (HttpRequest) -> HttpResponse
if settings.DEVELOPMENT and os.path.exists('var/handlebars-templates/compile.error'):
response = render(request, 'zerver/handlebars_compilation_failed.html')
response.status_code = 500
return response
if not settings.ROOT_DOMAIN_LANDING_PAGE:
return home_real(request)
# If settings.ROOT_DOMAIN_LANDING_PAGE, sends the user the landing
# page, not the login form, on the root domain
subdomain = get_subdomain(request)
if subdomain != "":
return home_real(request)
return render(request, 'zerver/hello.html')
@zulip_login_required
def home_real(request):
# type: (HttpRequest) -> HttpResponse
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
# If a user hasn't signed the current Terms of Service, send them there
if settings.TERMS_OF_SERVICE is not None and settings.TOS_VERSION is not None and \
int(settings.TOS_VERSION.split('.')[0]) > user_profile.major_tos_version():
return accounts_accept_terms(request)
narrow = [] # type: List[List[Text]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream_name = request.GET.get("stream")
(narrow_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, narrow_stream_name)
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_stream is not None and narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if user_profile.last_reminder is not None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get narrowed to PM with welcome-bot
needs_tutorial = user_profile.tutorial_status == UserProfile.TUTORIAL_WAITING
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
# Set default language and make it persist
default_language = register_ret['default_language']
url_lang = '/{}'.format(request.LANGUAGE_CODE)
if not request.path.startswith(url_lang):
translation.activate(default_language)
request.session[translation.LANGUAGE_SESSION_KEY] = default_language
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
# Server settings.
development_environment = settings.DEVELOPMENT,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
root_domain_uri = settings.ROOT_DOMAIN_URI,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
max_avatar_file_size = settings.MAX_AVATAR_FILE_SIZE,
server_generation = settings.SERVER_GENERATION,
use_websockets = settings.USE_WEBSOCKETS,
save_stacktraces = settings.SAVE_FRONTEND_STACKTRACES,
server_inline_image_preview = settings.INLINE_IMAGE_PREVIEW,
server_inline_url_embed_preview = settings.INLINE_URL_EMBED_PREVIEW,
password_min_length = settings.PASSWORD_MIN_LENGTH,
password_min_quality = settings.PASSWORD_MIN_ZXCVBN_QUALITY,
# Misc. extra data.
have_initial_messages = user_has_messages,
initial_servertime = time.time(), # Used for calculating relative presence age
default_language_name = get_language_name(register_ret['default_language']),
language_list_dbl_col = get_language_list_for_templates(register_ret['default_language']),
language_list = get_language_list(),
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
)
undesired_register_ret_fields = [
'streams',
]
for field_name in set(register_ret.keys()) - set(undesired_register_ret_fields):
page_params[field_name] = register_ret[field_name]
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
page_params["enable_desktop_notifications"] = False
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render(request, 'zerver/index.html',
context={'user_profile': user_profile,
'page_params': simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE_ENABLED,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.webathena_enabled,
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
},)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponseRedirect(reverse('zerver.views.home.home'))
def apps_view(request, _):
# type: (HttpRequest, Text) -> HttpResponse
if settings.ZILENCER_ENABLED:
return render(request, 'zerver/apps.html')
return HttpResponseRedirect('https://zulipchat.com/apps/', status=301)
def is_buggy_ua(agent):
# type: (str) -> bool
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
"Mac" not in agent
|
|
from bw2io.package import BW2Package
from lcopt.model import LcoptModel, unnormalise_unit
from lcopt.interact import FlaskSandbox
from copy import deepcopy
from collections import OrderedDict
from warnings import warn
#import networkx as nx
def validate_imported_model(model):
db = model.database['items']
ecoinvent_name = model.ecoinventName
ecoinvent_items = [x['items'] for x in model.external_databases if x['name'] == ecoinvent_name][0]
ecoinvent_links = []
for key, item in db.items():
if item.get('ext_link'):
if item['ext_link'][0] == ecoinvent_name:
ecoinvent_links.append(item['ext_link'])
for link in ecoinvent_links:
if not ecoinvent_items.get(link):
warn("{} not found in ecoinvent 3.3 cutoff database".format(link))
return False
return True
def get_sandbox_root(links):
froms = []
tos = []
for l in links:
froms.append(l['sourceID'])
tos.append(l['targetID'])
fset = set(froms)
tset = set(tos)
roots = [x for x in tset if x not in fset]
#print(sorted(fset))
#print(sorted(tset))
if len(roots) == 1:
return roots[0]
else:
print('Multiple roots found!')
return False
def get_sandbox_neighbours(sandbox_links, root):
neighbours = []
for x in sandbox_links:
if x['targetID'] == root:
neighbours.append(x['sourceID'])
return neighbours
def hierarchy_pos(links, root, width=1., vert_gap=0.2, vert_loc=0, xcenter=0.5, pos=None, parent=None, min_dx=0.03):
'''If there is a cycle that is reachable from root, then this will see infinite recursion.
G: the graph
root: the root node of current branch
width: horizontal space allocated for this branch - avoids overlap with other branches
vert_gap: gap between levels of hierarchy
vert_loc: vertical location of root
xcenter: horizontal location of root
pos: a dict saying where all nodes go if they have been assigned
parent: parent of this branch.'''
if pos is None:
pos = {root: (xcenter, vert_loc)}
else:
pos[root] = (xcenter, vert_loc)
neighbors = get_sandbox_neighbours(links, root)
if len(neighbors) != 0:
dx = max(width / len(neighbors), min_dx)
#nextx = xcenter - width / 2 - dx / 2
nextx = pos[root][0] - (len(neighbors) - 1) * dx / 2 - dx
for neighbor in neighbors:
nextx += dx
pos = hierarchy_pos(links, neighbor, width=dx, vert_gap=vert_gap,
vert_loc=vert_loc - vert_gap, xcenter=nextx, pos=pos,
parent=root)
return pos
def compute_layout(fs):
#nx_nodes = []
#n = deepcopy(nodes)
#for x in n:
# i = x.pop('id')
# nx_nodes.append((i, x))
#nx_links = []
#l = deepcopy(links)
#for x in l:
# from_id = x.pop('sourceID')
# to_id = x.pop('targetID')
# nx_links.append((from_id, to_id, x))
#G = nx.Graph()
#G.add_nodes_from(nx_nodes)
#G.add_edges_from(nx_links)
nodes = fs.nodes
links = fs.links
pos = hierarchy_pos(links, get_sandbox_root(links))
pos90 = {k: (v[1], -v[0]) for k, v in pos.items()}
xs = [v[0] for k, v in pos90.items()]
ys = [v[1] for k, v in pos90.items()]
s_xs = [(x - min(xs))for x in xs]
s_ys = [(y - min(ys))for y in ys]
row = 50
col = 300
max_height = 1000
max_width = 1100
height = min([max_height, len(set(ys)) * row])
width = min([max_width, len(set(xs)) * col])
pad_top = 20
pad_left = 20
pos_scaled = {k: ((v[0] - min(xs)) / max(s_xs) * width + pad_left, (v[1] - min(ys)) / max(s_ys) * height + pad_top) for k, v in pos90.items()}
sandbox = {k: {'x': v[0], 'y': v[1]} for k, v in pos_scaled.items()}
processes = [k for k, v in fs.reverse_process_output_map.items()]
process_fudge_factor = 10 # process boxes are (generally) 20px taller than inputs, so if we shift these up 10 pixels it looks nicer...
for k, v in sandbox.items():
if k in processes:
sandbox[k]['y'] -= process_fudge_factor
return sandbox
def create_LcoptModel_from_BW2Package(import_filename, autosetup=True):
import_data = BW2Package.load_file(import_filename)
orig_db = import_data[0]['data']
db_name = import_data[0]['name']
model = LcoptModel(db_name, autosetup=autosetup)
db = deepcopy(orig_db)
temp_param_set = []
temp_production_param_set = []
for k, v in db.items():
exchanges = []
production_amount = v.get('production amount', 1)
if production_amount != 1:
print("NOTE: Production amount for {} is not 1 unit ({})".format(v['name'], production_amount, production_amount))
temp_production_param_set.append({'of': v['name'], 'amount': production_amount})
"""p_exs = [e for e in v['exchanges'] if e['type'] == 'production']
t_exs = [e for e in v['exchanges'] if e['type'] == 'technosphere']
if len(p_exs) == 0:
print(v['name'] + " has no production exchange")
if len(p_exs) == 0 and len(t_exs) == 1:
temp_tech_exc = deepcopy(t_exs[0])
exc_name = temp_tech_exc.pop('name')
exc_input = temp_tech_exc.pop('input')
exc_unit = unnormalise_unit(temp_tech_exc.pop('unit'))
exc_type = 'production'
this_exc = {
'name': exc_name,
'type': exc_type,
'unit': exc_unit,
'amount': 1,
'lcopt_type': 'intermediate',
}
exchanges.append(this_exc)"""
for e in v['exchanges']:
exc_name = e.pop('name')
exc_input = e.pop('input')
exc_unit = unnormalise_unit(e.pop('unit'))
exc_amount = e.pop('amount')
exc_type = e.pop('type')
temp_param_set.append({'from': exc_name, 'to': v['name'], 'amount': exc_amount})
if e.get('location'):
e.pop('location')
if exc_type == 'production':
this_exc = {
'name': exc_name,
'type': exc_type,
'unit': exc_unit,
'lcopt_type': 'intermediate',
}
this_exc = {**this_exc, **e}
exchanges.append(this_exc)
elif exc_type == 'technosphere':
this_exc = {
'name': exc_name,
'type': exc_type,
'unit': exc_unit,
}
exc_db = exc_input[0]
if exc_db == db_name:
this_exc['lcopt_type'] = 'intermediate'
else:
this_exc['ext_link'] = ('Ecoinvent3_3_cutoff', exc_input[1])
this_exc['lcopt_type'] = 'input'
this_exc = {**this_exc, **e}
exchanges.append(this_exc)
elif exc_type == 'biosphere':
this_exc = {
'name': exc_name,
'type': 'technosphere',
'unit': exc_unit,
}
this_exc['ext_link'] = exc_input
this_exc['lcopt_type'] = 'biosphere'
this_exc = {**this_exc, **e}
exchanges.append(this_exc)
model.create_process(v['name'], exchanges)
param_set = OrderedDict()
#model.parameter_scan()
#print (model.names)
for p in temp_param_set:
exc_from = model.names.index(p['from'])
exc_to = model.names.index(p['to'])
if exc_from != exc_to:
parameter_id = "p_{}_{}".format(exc_from, exc_to)
param_set[parameter_id] = p['amount']
for p in temp_production_param_set:
exc_of = model.names.index(p['of'])
parameter_id = "p_{}_production".format(exc_of)
param_set[parameter_id] = p['amount']
model.parameter_sets[db_name] = param_set
model.parameter_scan()
fs = FlaskSandbox(model)
model.sandbox_positions = compute_layout(fs)
if validate_imported_model(model):
print('\nModel created successfully')
return model
else:
print('\nModel not valid - check the ecoinvent version in brightway2')
return None
|
|
"""Tests for pickaxe.py using pytest."""
from pathlib import Path
import pytest
from minedatabase.filters import (
AtomicCompositionFilter,
MCSFilter,
MetabolomicsFilter,
MWFilter,
SimilarityFilter,
SimilaritySamplingFilter,
)
file_path = Path(__file__)
file_dir = file_path.parent
DATA_DIR = (file_dir / "../data/").resolve()
# check for eQ
try:
from equilibrator_api import Q_
from minedatabase.filters.thermodynamics import ThermoFilter
from minedatabase.thermodynamics import Thermodynamics
thermo = Thermodynamics()
try:
thermo.load_thermo_from_postgres()
loaded_db = True
except:
try:
thermo.load_thermo_from_sqlite()
loaded_db = True
except:
pass
except:
pass
def test_similarity_cutoff_single(pk_target):
"""Test similarity cutoff filter"""
tani_threshold = 0.5
_filter = SimilarityFilter(
crit_similarity=tani_threshold, increasing_similarity=False
)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
assert len(pk_target.compounds) == 355
assert (
pk_target.compounds["C779bfa0d747509f0499664b390657a336edec104"]["Expand"]
== True
)
@pytest.mark.skip("Heisenbug Test")
def test_filter_after(pk_target):
"""Test similarity cutoff filter"""
tani_threshold = 0.5
_filter = SimilarityFilter(
crit_similarity=tani_threshold, increasing_similarity=False
)
pk_target.filter_after_final_gen = True
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
assert len(pk_target.compounds) == 257
assert (
pk_target.compounds["C779bfa0d747509f0499664b390657a336edec104"]["Expand"]
== False
)
def test_similarity_cutoff_multi(pk_target):
"""Test similarity cutoff filter"""
tani_threshold = [0, 0.3, 0.5]
_filter = SimilarityFilter(
crit_similarity=tani_threshold, increasing_similarity=False
)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
assert len(pk_target.compounds) == 1094
assert (
pk_target.compounds["C779bfa0d747509f0499664b390657a336edec104"]["Expand"]
== True
)
def test_similarity_cutoff_multi_short_list(pk_target):
"""Test similarity filter when the tani_threshold is shorter than generations."""
tani_threshold = [0.5]
_filter = SimilarityFilter(
crit_similarity=tani_threshold, increasing_similarity=False
)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
assert len(pk_target.compounds) == 355
assert (
pk_target.compounds["C779bfa0d747509f0499664b390657a336edec104"]["Expand"]
== True
)
def test_similarity_no_targets(pk_target):
pk_target.target_smiles = []
tani_threshold = 0.5
_filter = SimilarityFilter(
crit_similarity=tani_threshold, increasing_similarity=False
)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
assert len(pk_target.compounds) == 1348
assert (
pk_target.compounds["C779bfa0d747509f0499664b390657a336edec104"]["Expand"]
== True
)
def test_similarity_sample_default_weight(pk_target):
"""Test similarity cutoff filter"""
_filter = SimilaritySamplingFilter(sample_size=10, weight=None)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
# Filter must return less compounds than non-filter
# Non-deterministic results, so no exact value can be used
assert len(pk_target.compounds) < 1452
def test_similarity_sample_user_weight(pk_target):
"""Test similarity cutoff filter"""
def weight(T):
return T ** 4
_filter = SimilaritySamplingFilter(sample_size=10, weight=weight)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
# Filter must return less compounds than non-filter
# Non-deterministic results, so no exact value can be used
assert len(pk_target.compounds) < 1452
def test_similarity_sample_morgan(pk_target):
"""Test overwriting defaults"""
fingerprint_method = "Morgan"
fingerprint_args = {"radius": 2}
_filter = SimilaritySamplingFilter(
sample_size=10,
fingerprint_method=fingerprint_method,
fingerprint_args=fingerprint_args,
)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
# Filter must return less compounds than non-filter
# Non-deterministic results, so no exact value can be used
assert len(pk_target.compounds) < 1452 and len(pk_target.compounds) > 100
def test_similarity_sample_dice(pk_target):
"""Test overwriting defaults"""
fingerprint_method = "Morgan"
fingerprint_args = {"radius": 2}
similarity_method = "Dice"
_filter = SimilaritySamplingFilter(
sample_size=10,
fingerprint_method=fingerprint_method,
fingerprint_args=fingerprint_args,
similarity_method=similarity_method,
)
pk_target.filters.append(_filter)
pk_target.transform_all(processes=2, generations=2)
# Filter must return less compounds than non-filter
# Non-deterministic results, so no exact value can be used
assert len(pk_target.compounds) < 1452 and len(pk_target.compounds) > 100
def test_similarity_sample_multiprocess(pk_target):
"""Test similarity cutoff filter"""
def weight(T):
return T ** 4
_filter = SimilaritySamplingFilter(sample_size=10, weight=weight)
pk_target.react_targets = True
pk_target.filters.append(_filter)
pk_target.transform_all(processes=2, generations=2)
# Filter must return less compounds than non-filter
# Non-deterministic results, so no exact value can be used
assert len(pk_target.compounds) < 1452
def test_MCS_list(pk_target):
"""Test similarity cutoff filter"""
MCS_threshold = [0.1, 0.5]
_filter = MCSFilter(crit_mcs=MCS_threshold)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=2)
assert len(pk_target.compounds) == 340
@pytest.mark.skipif(not loaded_db, reason="No eQuilibrator DB found.")
def test_thermo_phys(pk_target):
"""Test thermo cutoff for physiological"""
_filter = ThermoFilter(physiological=True)
pk_target.filters.append(_filter)
pk_target.transform_all(generations=1)
assert True
def test_met_filter_mass(pk_target):
"""Test MetabolomicsFilter output without RT predictor."""
metabolomics_data_path = DATA_DIR / "test_metabolomics/test_metabolomics_data.csv"
met_filter = MetabolomicsFilter(
filter_name="test_metabolomics_filter",
met_data_name="test_metabolomics_data",
met_data_path=metabolomics_data_path,
possible_adducts=["[M+H]+", "[M-H]-"],
mass_tolerance=0.001,
)
pk_target.filters.append(met_filter)
pk_target.transform_all(generations=2)
gen1_cpds = [
pk_target.compounds[cpd]
for cpd in pk_target.compounds
if pk_target.compounds[cpd]["Generation"] == 1
]
assert len(gen1_cpds) == 1
assert gen1_cpds[0]["Matched_Peak_IDs"] == ["Test3"]
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/ImplementationGuide) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class ImplementationGuide(domainresource.DomainResource):
""" A set of rules about how FHIR is used.
A set of rules or how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide into a
logical whole, and to publish a computable definition of all the parts.
"""
resource_name = "ImplementationGuide"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.binary = None
""" Image, css, script, etc..
List of `str` items. """
self.contact = None
""" Contact details of the publisher.
List of `ImplementationGuideContact` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date for this version of the Implementation Guide.
Type `FHIRDate` (represented as `str` in JSON). """
self.dependency = None
""" Another Implementation guide this depends on.
List of `ImplementationGuideDependency` items (represented as `dict` in JSON). """
self.description = None
""" Natural language description of the Implementation Guide.
Type `str`. """
self.experimental = None
""" If for testing purposes, not real usage.
Type `bool`. """
self.fhirVersion = None
""" FHIR Version this Implementation Guide targets.
Type `str`. """
self.global_fhir = None
""" Profiles that apply globally.
List of `ImplementationGuideGlobal` items (represented as `dict` in JSON). """
self.name = None
""" Informal name for this Implementation Guide.
Type `str`. """
self.package = None
""" Group of resources as used in .page.package.
List of `ImplementationGuidePackage` items (represented as `dict` in JSON). """
self.page = None
""" Page/Section in the Guide.
Type `ImplementationGuidePage` (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (Organization or individual).
Type `str`. """
self.status = None
""" draft | active | retired.
Type `str`. """
self.url = None
""" Absolute URL used to reference this Implementation Guide.
Type `str`. """
self.useContext = None
""" The implementation guide is intended to support these contexts.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.version = None
""" Logical id for this version of the Implementation Guide.
Type `str`. """
super(ImplementationGuide, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuide, self).elementProperties()
js.extend([
("binary", "binary", str, True, None, False),
("contact", "contact", ImplementationGuideContact, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("dependency", "dependency", ImplementationGuideDependency, True, None, False),
("description", "description", str, False, None, False),
("experimental", "experimental", bool, False, None, False),
("fhirVersion", "fhirVersion", str, False, None, False),
("global_fhir", "global", ImplementationGuideGlobal, True, None, False),
("name", "name", str, False, None, True),
("package", "package", ImplementationGuidePackage, True, None, True),
("page", "page", ImplementationGuidePage, False, None, True),
("publisher", "publisher", str, False, None, False),
("status", "status", str, False, None, True),
("url", "url", str, False, None, True),
("useContext", "useContext", codeableconcept.CodeableConcept, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class ImplementationGuideContact(backboneelement.BackboneElement):
""" Contact details of the publisher.
Contacts to assist a user in finding and communicating with the publisher.
"""
resource_name = "ImplementationGuideContact"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of a individual to contact.
Type `str`. """
self.telecom = None
""" Contact details for individual or publisher.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(ImplementationGuideContact, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideContact, self).elementProperties()
js.extend([
("name", "name", str, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
class ImplementationGuideDependency(backboneelement.BackboneElement):
""" Another Implementation guide this depends on.
Another implementation guide that this implementation depends on.
Typically, an implementation guide uses value sets, profiles etc.defined in
other implementation guides.
"""
resource_name = "ImplementationGuideDependency"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.type = None
""" reference | inclusion.
Type `str`. """
self.uri = None
""" Where to find dependency.
Type `str`. """
super(ImplementationGuideDependency, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideDependency, self).elementProperties()
js.extend([
("type", "type", str, False, None, True),
("uri", "uri", str, False, None, True),
])
return js
class ImplementationGuideGlobal(backboneelement.BackboneElement):
""" Profiles that apply globally.
A set of profiles that all resources covered by this implementation guide
must conform to.
"""
resource_name = "ImplementationGuideGlobal"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.profile = None
""" Profile that all resources must conform to.
Type `FHIRReference` referencing `StructureDefinition` (represented as `dict` in JSON). """
self.type = None
""" Type this profiles applies to.
Type `str`. """
super(ImplementationGuideGlobal, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuideGlobal, self).elementProperties()
js.extend([
("profile", "profile", fhirreference.FHIRReference, False, None, True),
("type", "type", str, False, None, True),
])
return js
class ImplementationGuidePackage(backboneelement.BackboneElement):
""" Group of resources as used in .page.package.
A logical group of resources. Logical groups can be used when building
pages.
"""
resource_name = "ImplementationGuidePackage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Human readable text describing the package.
Type `str`. """
self.name = None
""" Name used .page.package.
Type `str`. """
self.resource = None
""" Resource in the implementation guide.
List of `ImplementationGuidePackageResource` items (represented as `dict` in JSON). """
super(ImplementationGuidePackage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuidePackage, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("name", "name", str, False, None, True),
("resource", "resource", ImplementationGuidePackageResource, True, None, True),
])
return js
class ImplementationGuidePackageResource(backboneelement.BackboneElement):
""" Resource in the implementation guide.
A resource that is part of the implementation guide. Conformance resources
(value set, structure definition, conformance statements etc.) are obvious
candidates for inclusion, but any kind of resource can be included as an
example resource.
"""
resource_name = "ImplementationGuidePackageResource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.acronym = None
""" Short code to identify the resource.
Type `str`. """
self.description = None
""" Reason why included in guide.
Type `str`. """
self.exampleFor = None
""" Resource this is an example of (if applicable).
Type `FHIRReference` referencing `StructureDefinition` (represented as `dict` in JSON). """
self.name = None
""" Human Name for the resource.
Type `str`. """
self.purpose = None
""" example | terminology | profile | extension | dictionary | logical.
Type `str`. """
self.sourceReference = None
""" Location of the resource.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.sourceUri = None
""" Location of the resource.
Type `str`. """
super(ImplementationGuidePackageResource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuidePackageResource, self).elementProperties()
js.extend([
("acronym", "acronym", str, False, None, False),
("description", "description", str, False, None, False),
("exampleFor", "exampleFor", fhirreference.FHIRReference, False, None, False),
("name", "name", str, False, None, False),
("purpose", "purpose", str, False, None, True),
("sourceReference", "sourceReference", fhirreference.FHIRReference, False, "source", True),
("sourceUri", "sourceUri", str, False, "source", True),
])
return js
class ImplementationGuidePage(backboneelement.BackboneElement):
""" Page/Section in the Guide.
A page / section in the implementation guide. The root page is the
implementation guide home page.
"""
resource_name = "ImplementationGuidePage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.format = None
""" Format of the page (e.g. html, markdown, etc.).
Type `str`. """
self.kind = None
""" page | example | list | include | directory | dictionary | toc |
resource.
Type `str`. """
self.name = None
""" Short name shown for navigational assistance.
Type `str`. """
self.package = None
""" Name of package to include.
List of `str` items. """
self.page = None
""" Nested Pages / Sections.
List of `ImplementationGuidePage` items (represented as `dict` in JSON). """
self.source = None
""" Where to find that page.
Type `str`. """
self.type = None
""" Kind of resource to include in the list.
List of `str` items. """
super(ImplementationGuidePage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImplementationGuidePage, self).elementProperties()
js.extend([
("format", "format", str, False, None, False),
("kind", "kind", str, False, None, True),
("name", "name", str, False, None, True),
("package", "package", str, True, None, False),
("page", "page", ImplementationGuidePage, True, None, False),
("source", "source", str, False, None, True),
("type", "type", str, True, None, False),
])
return js
from . import codeableconcept
from . import contactpoint
from . import fhirdate
from . import fhirreference
|
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.blockstore import BlockStore
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP3, OP_DROP
from io import BytesIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature):
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.block_store.close()
stop_nodes(self.nodes)
wait_bitcoinds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.block_store = BlockStore(self.options.tmpdir)
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 536870913, self.sequence_lock_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.mtp_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.donothing, self.csv_invalidate)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
'''Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP3, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
'''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan)
'''
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
'''Modify the nLockTime to make it fails once MTP rule is activated
'''
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monitoring."""
# pylint: disable=invalid-name
# TODO(ochang): Remove V3 from names once all metrics are migrated to
# stackdriver.
import bisect
import collections
import functools
import itertools
import re
import threading
import time
import six
try:
from google.cloud import monitoring_v3
except (ImportError, RuntimeError):
monitoring_v3 = None
from google.api_core import exceptions
from google.api_core import retry
from clusterfuzz._internal.base import errors
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import compute_metadata
from clusterfuzz._internal.google_cloud_utils import credentials
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
CUSTOM_METRIC_PREFIX = 'custom.googleapis.com/'
FLUSH_INTERVAL_SECONDS = 10 * 60 # 10 minutes.
RETRY_DEADLINE_SECONDS = 5 * 60 # 5 minutes.
INITIAL_DELAY_SECONDS = 16
MAXIMUM_DELAY_SECONDS = 2 * 60 # 2 minutes.
MAX_TIME_SERIES_PER_CALL = 200
_retry_wrap = retry.Retry(
predicate=retry.if_exception_type((
exceptions.Aborted,
exceptions.DeadlineExceeded,
exceptions.ResourceExhausted,
exceptions.ServerError,
exceptions.ServiceUnavailable,
)),
initial=INITIAL_DELAY_SECONDS,
maximum=MAXIMUM_DELAY_SECONDS,
deadline=RETRY_DEADLINE_SECONDS)
class _MockMetric(object):
"""Mock metric object, used for when monitoring isn't available."""
def _mock_method(self, *args, **kwargs): # pylint: disable=unused-argument
pass
def __getattr__(self, _):
return self._mock_method
class _FlusherThread(threading.Thread):
"""Flusher thread."""
def __init__(self):
super(_FlusherThread, self).__init__()
self.daemon = True
self.stop_event = threading.Event()
def run(self):
"""Run the flusher thread."""
create_time_series = _retry_wrap(_monitoring_v3_client.create_time_series)
project_path = _monitoring_v3_client.project_path(
utils.get_application_id())
while True:
try:
if self.stop_event.wait(FLUSH_INTERVAL_SECONDS):
return
time_series = []
end_time = time.time()
for metric, labels, start_time, value in _metrics_store.iter_values():
if (metric.metric_kind ==
monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE):
start_time = end_time
series = monitoring_v3.types.TimeSeries()
metric.monitoring_v3_time_series(series, labels, start_time, end_time,
value)
time_series.append(series)
if len(time_series) == MAX_TIME_SERIES_PER_CALL:
create_time_series(project_path, time_series)
time_series = []
if time_series:
create_time_series(project_path, time_series)
except Exception:
logs.log_error('Failed to flush metrics.')
def stop(self):
self.stop_event.set()
self.join()
_StoreValue = collections.namedtuple(
'_StoreValue', ['metric', 'labels', 'start_time', 'value'])
class _MetricsStore(object):
"""In-process metrics store."""
def __init__(self):
self._store = {}
self._lock = threading.RLock()
def _get_key(self, metric_name, labels):
"""Get the key used for storing values."""
if labels:
normalized_labels = tuple(sorted(six.iteritems(labels)))
else:
normalized_labels = None
return (metric_name, normalized_labels)
def iter_values(self):
with self._lock:
for value in six.itervalues(self._store):
yield value
def get(self, metric, labels):
"""Get the stored value for the metric."""
with self._lock:
key = self._get_key(metric.name, labels)
return self._store[key]
def put(self, metric, labels, value):
"""Store new value for the metric."""
with self._lock:
key = self._get_key(metric.name, labels)
if key in self._store:
start_time = self._store[key].start_time
else:
start_time = time.time()
self._store[key] = _StoreValue(metric, labels, start_time, value)
def increment(self, metric, labels, delta):
"""Increment a value by |delta|."""
with self._lock:
key = self._get_key(metric.name, labels)
if key in self._store:
start_time = self._store[key].start_time
value = self._store[key].value + delta
else:
start_time = time.time()
value = metric.default_value + delta
self._store[key] = _StoreValue(metric, labels, start_time, value)
def reset_for_testing(self):
"""Reset all data. Used for tests."""
with self._lock:
self._store.clear()
class _Field(object):
"""_Field is the base class used for field specs."""
def __init__(self, name):
self.name = name
@property
def value_type(self):
raise NotImplementedError
class StringField(_Field):
"""StringField spec."""
@property
def value_type(self):
return monitoring_v3.enums.LabelDescriptor.ValueType.STRING
class BooleanField(_Field):
"""BooleanField spec."""
@property
def value_type(self):
return monitoring_v3.enums.LabelDescriptor.ValueType.BOOL
class IntegerField(_Field):
"""IntegerField spec."""
@property
def value_type(self):
return monitoring_v3.enums.LabelDescriptor.ValueType.INT64
class Metric(object):
"""Base metric class."""
def __init__(self, name, description, field_spec):
self.name = name
self.description = description
self.field_spec = field_spec or []
@property
def value_type(self):
raise NotImplementedError
@property
def metric_kind(self):
raise NotImplementedError
@property
def default_value(self):
raise NotImplementedError
def _set_value(self, point, value):
raise NotImplementedError
def get(self, labels=None):
"""Return the current value for the labels. Used for testing."""
try:
return _metrics_store.get(self, labels).value
except KeyError:
return self.default_value
def monitoring_v3_metric(self, metric, labels=None):
"""Get the monitoring_v3 Metric."""
metric.type = CUSTOM_METRIC_PREFIX + self.name
if not labels:
return metric
for key, value in six.iteritems(labels):
metric.labels[key] = str(value)
# Default labels.
bot_name = environment.get_value('BOT_NAME')
metric.labels['region'] = _get_region(bot_name)
return metric
def monitoring_v3_metric_descriptor(self, descriptor):
"""Get the monitoring_v3 MetricDescriptor."""
descriptor.name = self.name
descriptor.type = CUSTOM_METRIC_PREFIX + self.name
descriptor.metric_kind = self.metric_kind
descriptor.value_type = self.value_type
descriptor.description = self.description
for field in itertools.chain(DEFAULT_FIELDS, self.field_spec):
label_descriptor = descriptor.labels.add()
label_descriptor.key = field.name
label_descriptor.value_type = field.value_type
return descriptor
def monitoring_v3_time_series(self, time_series, labels, start_time, end_time,
value):
"""Get the TimeSeries corresponding to the metric."""
self.monitoring_v3_metric(time_series.metric, labels)
time_series.resource.CopyFrom(_monitored_resource)
time_series.metric_kind = self.metric_kind
time_series.value_type = self.value_type
point = time_series.points.add()
_time_to_timestamp(point.interval.start_time, start_time)
_time_to_timestamp(point.interval.end_time, end_time)
self._set_value(point.value, value)
return time_series
class _CounterMetric(Metric):
"""Counter metric."""
@property
def value_type(self):
return monitoring_v3.enums.MetricDescriptor.ValueType.INT64
@property
def metric_kind(self):
return monitoring_v3.enums.MetricDescriptor.MetricKind.CUMULATIVE
@property
def default_value(self):
return 0
def increment(self, labels=None):
self.increment_by(1, labels=labels)
def increment_by(self, count, labels=None):
_metrics_store.increment(self, labels, count)
def _set_value(self, point, value):
"""Get Point."""
point.int64_value = value
class _GaugeMetric(Metric):
"""Gauge metric."""
@property
def value_type(self):
return monitoring_v3.enums.MetricDescriptor.ValueType.INT64
@property
def metric_kind(self):
return monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE
@property
def default_value(self):
return 0
def set(self, value, labels=None):
_metrics_store.put(self, labels, value)
def _set_value(self, point, value):
"""Get Point."""
point.int64_value = value
class _Bucketer(object):
"""Bucketer."""
def bucket_for_value(self, value):
"""Get the bucket index for the given value."""
return bisect.bisect(self._lower_bounds, value) - 1
@property
def num_buckets(self):
return len(self._lower_bounds)
class FixedWidthBucketer(_Bucketer):
"""Fixed width bucketer."""
def __init__(self, width, num_finite_buckets=100):
self.width = width
self.num_finite_buckets = num_finite_buckets
# [-Inf, 0), [0, width), [width, 2*width], ... , [n*width, Inf)
self._lower_bounds = [float('-Inf')]
self._lower_bounds.extend(
[width * i for i in range(num_finite_buckets + 1)])
class GeometricBucketer(_Bucketer):
"""Geometric bucketer."""
def __init__(self, growth_factor=10**0.2, num_finite_buckets=100, scale=1.0):
self.growth_factor = growth_factor
self.num_finite_buckets = num_finite_buckets
self.scale = scale
# [-Inf, scale), [scale, scale*growth),
# [scale*growth^i, scale*growth^(i+1)), ..., [scale*growth^n, Inf)
self._lower_bounds = [float('-Inf')]
self._lower_bounds.extend(
[scale * growth_factor**i for i in range(num_finite_buckets + 1)])
class _Distribution(object):
"""Holds a distribution."""
def __init__(self, bucketer):
self.bucketer = bucketer
self.buckets = [0 for _ in range(bucketer.num_buckets)]
self.sum = 0
self.count = 0
def add(self, value):
self.buckets[self.bucketer.bucket_for_value(value)] += 1
self.count += 1
self.sum += value
return self
__add__ = add
def monitoring_v3_distribution(self, distribution):
"""Set the monitoring_v3 Distribution value."""
distribution.count = self.count
if self.count:
distribution.mean = float(self.sum) / self.count
else:
distribution.mean = 0.0
if isinstance(self.bucketer, FixedWidthBucketer):
distribution.bucket_options.linear_buckets.offset = 0
distribution.bucket_options.linear_buckets.width = self.bucketer.width
distribution.bucket_options.linear_buckets.num_finite_buckets = (
self.bucketer.num_finite_buckets)
else:
assert isinstance(self.bucketer, GeometricBucketer)
distribution.bucket_options.exponential_buckets.scale = (
self.bucketer.scale)
distribution.bucket_options.exponential_buckets.growth_factor = (
self.bucketer.growth_factor)
distribution.bucket_options.exponential_buckets.num_finite_buckets = (
self.bucketer.num_finite_buckets)
distribution.bucket_counts.extend(self.buckets)
class _CumulativeDistributionMetric(Metric):
"""Cumulative distribution metric."""
def __init__(self, name, description, bucketer, field_spec=None):
super(_CumulativeDistributionMetric, self).__init__(
name, description=description, field_spec=field_spec)
self.bucketer = bucketer
@property
def value_type(self):
return monitoring_v3.enums.MetricDescriptor.ValueType.DISTRIBUTION
@property
def metric_kind(self):
return monitoring_v3.enums.MetricDescriptor.MetricKind.CUMULATIVE
@property
def default_value(self):
return _Distribution(self.bucketer)
def add(self, value, labels=None):
_metrics_store.increment(self, labels, value)
def _set_value(self, point, value):
value.monitoring_v3_distribution(point.distribution_value)
# Global state.
_metrics_store = _MetricsStore()
_monitoring_v3_client = None
_flusher_thread = None
_monitored_resource = None
# Add fields very conservatively here. There is a limit of 10 labels per metric
# descriptor, and metrics should be low in cardinality. That is, only add fields
# which have a small number of possible values.
DEFAULT_FIELDS = [
StringField('region'),
]
def check_module_loaded(module):
"""Used for mocking."""
return module is not None
def stub_unavailable(module):
"""Decorator to stub out functions on failed imports."""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if check_module_loaded(module):
return func(*args, **kwargs)
return _MockMetric()
return wrapper
return decorator
def _initialize_monitored_resource():
"""Monitored resources."""
global _monitored_resource
_monitored_resource = monitoring_v3.types.MonitoredResource()
# TODO(ochang): Use generic_node when that is available.
_monitored_resource.type = 'gce_instance'
# The project ID must be the same as the one we write metrics to, not the ID
# where the instance lives.
_monitored_resource.labels['project_id'] = utils.get_application_id()
# Use bot name here instance as that's more useful to us.
_monitored_resource.labels['instance_id'] = environment.get_value('BOT_NAME')
if compute_metadata.is_gce():
# Returned in the form projects/{id}/zones/{zone}
zone = compute_metadata.get('instance/zone').split('/')[-1]
_monitored_resource.labels['zone'] = zone
else:
# Default zone for instances not on GCE.
_monitored_resource.labels['zone'] = 'us-central1-f'
def _time_to_timestamp(timestamp, time_seconds):
"""Convert result of time.time() to Timestamp."""
timestamp.seconds = int(time_seconds)
timestamp.nanos = int((time_seconds - timestamp.seconds) * 10**9)
def initialize():
"""Initialize if monitoring is enabled for this bot."""
global _monitoring_v3_client
global _flusher_thread
if environment.get_value('LOCAL_DEVELOPMENT'):
return
if not local_config.ProjectConfig().get('monitoring.enabled'):
return
if check_module_loaded(monitoring_v3):
_initialize_monitored_resource()
_monitoring_v3_client = monitoring_v3.MetricServiceClient(
credentials=credentials.get_default()[0])
_flusher_thread = _FlusherThread()
_flusher_thread.start()
def stop():
"""Stops monitoring and cleans up (only if monitoring is enabled)."""
if _flusher_thread:
_flusher_thread.stop()
def metrics_store():
"""Get the per-process metrics store."""
return _metrics_store
def _get_region(bot_name):
"""Get bot region."""
try:
regions = local_config.MonitoringRegionsConfig()
except errors.BadConfigError:
return 'unknown'
for pattern in regions.get('patterns'):
if re.match(pattern['pattern'], bot_name):
return pattern['name']
return 'unknown'
@stub_unavailable(monitoring_v3)
def CounterMetric(name, description, field_spec):
"""Build _CounterMetric."""
return _CounterMetric(name, field_spec=field_spec, description=description)
@stub_unavailable(monitoring_v3)
def GaugeMetric(name, description, field_spec):
"""Build _CounterMetric."""
return _GaugeMetric(name, field_spec=field_spec, description=description)
@stub_unavailable(monitoring_v3)
def CumulativeDistributionMetric(name, description, bucketer, field_spec):
"""Build _CounterMetric."""
return _CumulativeDistributionMetric(
name, description=description, bucketer=bucketer, field_spec=field_spec)
|
|
# -*- coding: utf-8 -*-
import os
from anima import logger, log_file_handler
from anima.recent import RecentFileManager
class DCCBase(object):
"""Connects the DCC to Anima Pipeline.
In Anima Pipeline, a DCC is a host application like Maya, Nuke, Houdini etc.
Generally a GUI for the end user is given a DCCBase derived class instance
which helps the QtGui to be able to open, save, import or export a Version
without knowing the details of the DCC.
The DCC object supplies **methods** like ``open``, ``save``,
``export``, ``import`` or ``reference``. The main duty of the DCC
object is to introduce the host application (Maya, Houdini, Nuke, etc.) to
Stalker and let it to open, save, export, import or reference a file.
It is the pipeline developers duty to create the DCC classes for the
applications used in the studio by instantiating this class and overriding
the methods as necessary. You can find good examples in `Anima Tools`_
which is a Python package developed in `Anima Istanbul`_.
.. _Anima Tools: https://pypi.python.org/pypi/anima
.. _Anima Istanbul: http;//www.animaistanbul.com
Here is a brief example for creating an DCC for a generic program::
from Stalker import DCCBase
class MyProgramEnv(DCCBase):
\"""This is a class which will be used by the UI
\"""
def open():
\"""uses the programs own Python API to open a version of an
asset
\"""
# do anything that needs to be done before opening the file
my_programs_own_python_api.open(filepath=self.version.full_path)
def save():
\"""uses the programs own Python API to save the current file
as a new version.
\"""
# do anything that needs to be done before saving the file
my_programs_own_python_api.save(filepath=self.version.full_path)
# do anything that needs to be done after saving the file
and that is it.
The DCC class by default has a property called ``version``. Holding
the current open Version. It is None for a new scene and a
:class:`~stalker.models.version.Version` instance in any other case.
"""
name = "DCCBase"
representations = ["Base"]
has_publishers = False
allow_publish_on_export = False
extensions = []
project_structure = []
def __init__(self, name="", version=None):
self._name = name
self._version = version
def __str__(self):
"""the string representation of the DCC"""
return self._name
@property
def version(self):
"""returns the current Version instance which is open in the DCC"""
return self.get_current_version()
@property
def name(self):
"""returns the DCC name"""
return self._name
@name.setter
def name(self, name):
"""sets the DCC name"""
self._name = name
def save_as(self, version, run_pre_publishers=True):
"""The save as action of this DCC. It should save the current
scene or file to the given version.full_path
:param version: stalker.models.version.Version instance.
:param bool run_pre_publishers: Run pre publishers of this DCC
or not. Default value is True
"""
raise NotImplementedError("save_as is not implemented")
def export_as(self, version):
"""Exports the contents of the open document as the given version.
:param version: A :class:`~stalker.models.version.Version` instance
holding the desired version.
"""
raise NotImplementedError("export_as is not implemented")
def open(
self,
version,
force=False,
representation=None,
reference_depth=0,
skip_update_check=False,
):
"""the open action"""
raise NotImplementedError("open is not implemented")
def import_(self, version):
"""the import action"""
raise NotImplementedError("import_ is not implemented")
def reference(self, version, use_namespace=True):
"""the reference action"""
raise NotImplementedError("reference is not implemented")
def trim_repo_path(self, path):
"""Trims the repository path value from the given path
:param path: The path that wanted to be trimmed
:return: str
"""
# get the repo first
repo = self.find_repo(path)
if not repo:
return path
# then try to trim the path
if path.startswith(repo.path):
return path[len(repo.path) :]
elif path.startswith(repo.windows_path):
return path[len(repo.windows_path) :]
elif path.startswith(repo.linux_path):
return path[len(repo.linux_path) :]
elif path.startswith(repo.osx_path):
return path[len(repo.osx_path) :]
return path
@classmethod
def find_repo(cls, path):
"""returns the repository from the given path
:param str path: path in a repository
:return: stalker.models.repository.Repository
"""
# first find the repository
from stalker import Repository
return Repository.find_repo(path)
def get_versions_from_path(self, path):
"""Finds Version instances from the given path value.
Finds and returns the :class:`~stalker.models.version.Version`
instances from the given path value.
Returns an empty list if it can't find any matching.
This method is different than
:meth:`~anima.dcc.base.DCCBase.get_version_from_full_path`
because it returns a list of
:class:`~stalker.models.version.Version` instances which are
residing in that path. The list is ordered by the ``id``\ s of the
instances.
:param path: A path which has possible
:class:`~stalker.models.version.Version` instances.
:return: A list of :class:`~stalker.models.version.Version` instances.
"""
if not path:
return []
# convert '\\' to '/'
path = os.path.normpath(path).replace("\\", "/")
from stalker import Repository
os_independent_path = Repository.to_os_independent_path(path)
logger.debug("os_independent_path: %s" % os_independent_path)
from stalker import Version
from stalker.db.session import DBSession
# try to get all versions with that info
with DBSession.no_autoflush:
versions = Version.query.filter(
Version.full_path.startswith(os_independent_path)
).all()
return versions
@classmethod
def get_version_from_full_path(cls, full_path):
"""Finds the Version instance from the given full_path value.
Finds and returns a :class:`~stalker.models.version.Version` instance
from the given full_path value.
Returns None if it can't find any matching.
:param full_path: The full_path of the desired
:class:`~stalker.models.version.Version` instance.
:return: :class:`~stalker.models.version.Version`
"""
logger.debug("full_path: %s" % full_path)
# convert '\\' to '/'
full_path = os.path.normpath(os.path.expandvars(full_path)).replace("\\", "/")
# trim repo path
from stalker import Repository, Version
os_independent_path = Repository.to_os_independent_path(full_path)
# try to get a version with that info
logger.debug("getting a version with path: %s" % full_path)
version = Version.query.filter(Version.full_path == os_independent_path).first()
logger.debug("version: %s" % version)
return version
def get_current_version(self):
"""Returns the current Version instance from the DCC.
:returns: :class:`~stalker.models.version.Version` instance or None
"""
raise NotImplementedError("get_current_version is not implemented")
def append_to_recent_files(self, path):
"""appends the given path to the recent files list"""
# add the file to the recent file list
rfm = RecentFileManager()
rfm.add(self.name, path)
def get_version_from_recent_files(self):
"""This will try to create a :class:`.Version` instance by looking at
the recent files list.
It will return None if it can not find one.
:return: :class:`.Version`
"""
version = None
logger.debug("trying to get the version from recent file list")
# read the fileName from recent files list
# try to get the a valid asset file from starting the last recent file
rfm = RecentFileManager()
try:
recent_files = rfm[self.name]
except KeyError:
logger.debug("no recent files")
recent_files = None
if recent_files is not None:
for recent_file in recent_files:
version = self.get_version_from_full_path(recent_file)
if version is not None:
break
logger.debug("version from recent files is: %s" % version)
return version
def get_last_version(self):
"""Returns the last opened Version instance from the DCC.
* It first looks at the current open file full path and tries to match
it with a Version instance.
* Then searches for the recent files list.
* Still not able to find any Version instances, will return the version
instance with the highest id which has the current workspace path in
its path
* Still not able to find any Version instances returns None
:returns: :class:`~stalker.models.version.Version` instance or None.
"""
version = self.get_current_version()
# read the recent file list
if version is None:
version = self.get_version_from_recent_files()
return version
def get_project(self):
"""returns the current project from DCC"""
raise NotImplementedError("get_project is not implemented")
def set_project(self, version):
"""Sets the project to the given Versions project.
:param version: A :class:`~stalker.models.version.Version`.
"""
raise NotImplementedError("set_project is not implemented")
def update_version_inputs(self, parent_ref=None):
"""updates the references list of the current version
:param parent_ref: the parent ref, if given will override the given
version argument and a Version instance will be get from the given
parent_ref.path.
"""
logger.debug("parent_ref: %s" % parent_ref)
logger.debug("get a version")
if not parent_ref:
logger.debug("got no parent_ref")
version = self.get_current_version()
else:
logger.debug("have a parent_ref")
version = self.get_version_from_full_path(parent_ref.path)
if version:
logger.debug("got a version: %s" % version.absolute_full_path)
# use the original version if it is a Repr version
from anima.representation import Representation
if Representation.repr_separator in version.take_name and version.parent:
version = version.parent
logger.debug(
"this is a representation switching to its parent: %s" % version
)
# update the reference list
referenced_versions = self.get_referenced_versions(parent_ref)
version.inputs = referenced_versions
# commit data to the database
from stalker.db.session import DBSession
DBSession.add(version)
DBSession.commit()
def deep_version_inputs_update(self):
"""Updates the inputs of the references of the current scene"""
raise NotImplementedError("deep_version_inputs_update is not implemented")
def check_referenced_versions(self, pdm=None):
"""Deeply checks all the references in the scene and returns a
dictionary which has three keys called 'leave', 'update' and 'create'.
Each of these keys correspond to a value of a list of
:class:`~stalker.model.version.Version`\ s. Where the list in 'leave'
key shows the Versions referenced (or deeply referenced) to the
current scene which doesn't need to be changed.
The list in 'update' key holds Versions those need to be updated to a
newer version which are already exist.
The list in 'create' key holds Version instance which needs to have its
references to be updated to the never versions thus need a new version
for them self.
All the Versions in the list are sorted from the deepest to shallowest
reference, so processing the list from 0th element to nth will always
guarantee up to date info for the currently processed Version instance.
Uses the top level references to get a Stalker Version instance and
then tracks all the changes from these Version instances.
:return: dictionary
"""
if not pdm:
from anima.ui.progress_dialog import ProgressDialogManager
pdm = ProgressDialogManager()
caller = pdm.register(
3, "%s.check_referenced_versions() prepare data" % self.__class__.__name__
)
# deeply get which file is referencing which other files
self.deep_version_inputs_update()
if caller:
caller.step()
from anima.dcc import empty_reference_resolution
reference_resolution = empty_reference_resolution(
root=self.get_referenced_versions()
)
if caller:
caller.step()
# reverse walk in DFS
dfs_version_references = []
version = self.get_current_version()
if not version:
return reference_resolution
for v in version.walk_inputs():
dfs_version_references.append(v)
if caller:
caller.step()
# pop the first element which is the current scene
dfs_version_references.pop(0)
caller.end_progress()
# register a new caller
caller = pdm.register(
len(dfs_version_references),
"%s.check_referenced_versions()" % self.__class__.__name__,
)
# iterate back in the list
for v in reversed(dfs_version_references):
# check inputs first
to_be_updated_list = []
for ref_v in v.inputs:
if not ref_v.is_latest_published_version():
to_be_updated_list.append(ref_v)
if to_be_updated_list:
action = "create"
# check if there is a new published version of this version
# that is using all the updated versions of the references
latest_published_version = v.latest_published_version
if latest_published_version and not v.is_latest_published_version():
# so there is a new published version
# check if its children needs any update
# and the updated child versions are already
# referenced to the this published version
if all(
[
ref_v.latest_published_version
in latest_published_version.inputs
for ref_v in to_be_updated_list
]
):
# so all new versions are referenced to this published
# version, just update to this latest published version
action = "update"
else:
# not all references are in the inputs
# so we need to create a new version as usual
# and update the references to the latest versions
action = "create"
else:
# nothing needs to be updated,
# so check if this version has a new version,
# also there could be no reference under this referenced
# version
if v.is_latest_published_version():
# do nothing
action = "leave"
else:
# update to latest published version
action = "update"
# before setting the action check all the inputs in
# resolution_dictionary, if any of them are update, or create
# then set this one to 'create'
if any(
rev_v in reference_resolution["update"]
or rev_v in reference_resolution["create"]
for rev_v in v.inputs
):
action = "create"
# so append this v to the related action list
reference_resolution[action].append(v)
# from stalker import Version
# assert isinstance(v, Version)
caller.step(message=v.nice_name)
caller.end_progress()
return reference_resolution
def get_referenced_versions(self, parent_ref=None):
"""Returns the :class:`~stalker.models.version.Version` instances which
are referenced in to the current scene
:param parent_ref: The parent reference node.
:returns: list of :class:`~stalker.models.version.Version` instances.
"""
raise NotImplementedError("get_referenced_versions is not implemented")
def update_versions(self, reference_resolution):
"""Updates the versions to the latest ones.
:param reference_resolution: A dictionary with keys 'leave', 'update'
and 'create' with a list of :class:`~stalker.models.version.Version`
instances in each of them. Only 'update' key is used and if the
Version instance is in the 'update' list the reference is updated to
the latest version.
"""
raise NotImplementedError("update_versions is not implemented")
def get_frame_range(self):
"""Returns the frame range from the DCC
:returns: a tuple of integers containing the start and end frame
numbers
"""
raise NotImplementedError("get_frame_range is not implemented")
def set_frame_range(self, start_frame=0, end_frame=100, adjust_frame_range=False):
"""Sets the frame range in the DCC to the given start and end
frames
"""
raise NotImplementedError("set_frame_range is not implemented")
def get_fps(self):
"""Returns the frame rate of this current DCC"""
raise NotImplementedError("get_fps is not implemented")
def set_fps(self, fps=25):
"""Sets the frame rate of the DCC. The default value is 25.
:param float fps: The FPS of the current DCC. Defaults to 25.
:return:
"""
raise NotImplementedError("set_fps is not implemented")
def has_extension(self, filename):
"""Returns True if the given file names extension is in the extensions
list false otherwise.
accepts:
* a full path with extension or not
* a file name with extension or not
* an extension with a dot on the start or not
:param filename: A string containing the filename
"""
if filename is None:
return False
return filename.split(".")[-1].lower() in self.extensions
def load_referenced_versions(self):
"""loads all the references"""
raise NotImplementedError("load_referenced_versions is not implemented")
def replace_version(self, source_version, target_version):
"""Replaces the source_version with the target_version
:param source_version: A
:class:`~stalker.models.version.Version` instance holding the version
to be replaced
:param target_version: A
:class:`~stalker.models.version.Version` instance holding the new
version replacing the source one.
"""
raise NotImplementedError("replace_version is not implemented")
def replace_external_paths(self, mode=0):
"""Replaces the external paths (which are not starting with the
environment variable) with a proper path. The mode controls if the
resultant path should be absolute or relative to the project dir.
:param mode: Controls the resultant path is absolute or relative.
mode 0: absolute (a path which starts with $REPO)
mode 1: relative (to project path)
:return:
"""
raise NotImplementedError("replace_external_paths is not implemented")
def reference_filters(self, version, options):
"""Checks the given version against the given options
:param options: a dictionary object showing the reference options
:return:
"""
pass
@classmethod
def get_significant_name(
cls, version, include_project_code=True, include_version_number=True
):
"""returns a significant name starting from the closest parent which is
an Asset, Shot or Sequence and includes the ``Project.code``
:param version: The Stalker Version instance.
:param bool include_project_code: Include project code.
:param bool include_version_number: Include version number
:rtype : str
"""
if include_project_code:
sig_name = "%s_%s" % (version.task.project.code, version.nice_name)
else:
sig_name = version.nice_name
if include_version_number:
sig_name = "%s_v%03d" % (sig_name, version.version_number)
return sig_name
@classmethod
def local_backup_path(cls):
"""returns the local backup path
:return:
"""
# use the user home directory .stalker_local_backup
from anima import defaults
return os.path.normpath(
os.path.expanduser("%s/projects_backup" % defaults.local_cache_folder)
).replace("\\", "/")
def create_project_structure(self, version):
"""creates the project structure
:param version: Stalker version
"""
import os
project_path = version.absolute_path
for path in self.project_structure:
# TODO: use exist_ok=True for Python 3.x
try:
os.makedirs(os.path.normpath(os.path.join(project_path, path)))
except OSError:
# exists_ok
pass
@classmethod
def create_local_copy(cls, version):
"""Creates a local copy of the given version
:param version:
:return:
"""
output_path = os.path.join(
cls.local_backup_path(), version.absolute_path.replace(":", "")
).replace("\\", "/")
output_full_path = os.path.join(
cls.local_backup_path(), version.absolute_full_path.replace(":", "")
).replace("\\", "/")
# do nothing if the version and the copy is on the same drive
# (ex: do not duplicate the file)
if len(os.path.commonprefix([output_full_path, version.absolute_full_path])):
logger.debug(
"Local copy file: %s is on the same drive with the source "
"file: %s" % (output_full_path, version.absolute_full_path)
)
logger.debug("Not duplicating it!")
return
# create intermediate folders
try:
os.makedirs(output_path)
except OSError:
# already exists
pass
import shutil
try:
shutil.copy(version.absolute_full_path, output_full_path)
except IOError:
# no space left
pass
logger.debug("created copy to: %s" % output_full_path)
@classmethod
def get_shot(cls, version):
from stalker import Shot
for task in version.task.parents:
if isinstance(task, Shot):
return task
def is_shot_related_version(self, version):
"""Returns true if this is a shot related version
:param version:
:return:
"""
return self.get_shot(version) is not None
def set_render_resolution(self, width, height, pixel_aspect=1.0):
"""Sets the render resolution for the current DCC
:param int width: The width of the resolution
:param int height: The height of the resolution
:param float pixel_aspect: The pixel aspect ratio, defaults to 1.0.
:return:
"""
raise NotImplementedError("set_render_resolution is not implemented")
class Filter(object):
"""A filter class filters given options against the given versions related
task type.
:param version: :class:`~stalker.models.version.Version` instance. The
related :class:`~stalker.models.task.Task`\ s
:attr:`~stalker.models.task.Task.type` attribute is key here. It defines
which filter to apply to.
:param options: A dictionary with keys are the name of the option and the
value is the value of that option.
"""
def __init__(self):
pass
class OpenFilter(Filter):
"""A filter for Open operations"""
pass
class ReferenceFilter(Filter):
"""A filter for Reference operations"""
pass
class ImportFilter(Filter):
"""A filter for Import operations"""
pass
class ExportFilter(Filter):
"""A filter for Export operations"""
pass
class SaveAsFilter(Filter):
"""A Filter for Save As operations"""
pass
|
|
# Copyright 2016 Letv Cloud Computing
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:admin:floating_ips:index')
INDEX_TEMPLATE = 'horizon/common/_data_table_view.html'
class AdminFloatingIpViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('server_list', ),
api.keystone: ('tenant_list', ),
api.neutron: ('network_list',
'tenant_floating_ip_list',)})
def test_index(self):
# Use neutron test data
fips = self.floating_ips.list()
servers = self.servers.list()
tenants = self.tenants.list()
api.neutron.tenant_floating_ip_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(fips)
api.nova.server_list(IsA(http.HttpRequest), search_opts={'all_tenants': True}) \
.AndReturn([servers, False])
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
params = {"router:external": True}
api.neutron.network_list(IsA(http.HttpRequest), **params) \
.AndReturn(self.networks.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
self.assertIn('floating_ips_table', res.context)
floating_ips_table = res.context['floating_ips_table']
floating_ips = floating_ips_table.data
self.assertEqual(len(floating_ips), 2)
row_actions = floating_ips_table.get_row_actions(floating_ips[0])
self.assertEqual(len(row_actions), 1)
row_actions = floating_ips_table.get_row_actions(floating_ips[1])
self.assertEqual(len(row_actions), 2)
@test.create_stubs({api.neutron: ('tenant_floating_ip_get',
'network_get', )})
def test_floating_ip_detail_get(self):
fip = self.floating_ips.first()
network = self.networks.first()
api.neutron.tenant_floating_ip_get(
IsA(http.HttpRequest), fip.id).AndReturn(fip)
api.neutron.network_get(
IsA(http.HttpRequest), fip.pool).AndReturn(network)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:floating_ips:detail',
args=[fip.id]))
self.assertTemplateUsed(res,
'admin/floating_ips/detail.html')
self.assertEqual(res.context['floating_ip'].ip, fip.ip)
@test.create_stubs({api.neutron: ('tenant_floating_ip_get',)})
def test_floating_ip_detail_exception(self):
fip = self.floating_ips.first()
# Only supported by neutron, so raise a neutron exception
api.neutron.tenant_floating_ip_get(
IsA(http.HttpRequest),
fip.id).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:floating_ips:detail',
args=[fip.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('tenant_floating_ip_list', )})
def test_index_no_floating_ips(self):
api.neutron.tenant_floating_ip_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn([])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
@test.create_stubs({api.neutron: ('tenant_floating_ip_list', )})
def test_index_error(self):
api.neutron.tenant_floating_ip_list(IsA(http.HttpRequest),
all_tenants=True) \
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
@test.create_stubs({api.neutron: ('network_list',),
api.keystone: ('tenant_list',)})
def test_admin_allocate_get(self):
pool = self.networks.first()
tenants = self.tenants.list()
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
search_opts = {'router:external': True}
api.neutron.network_list(IsA(http.HttpRequest), **search_opts) \
.AndReturn([pool])
self.mox.ReplayAll()
url = reverse('horizon:admin:floating_ips:allocate')
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/floating_ips/allocate.html')
allocate_form = res.context['form']
pool_choices = allocate_form.fields['pool'].choices
self.assertEqual(len(pool_choices), 1)
tenant_choices = allocate_form.fields['tenant'].choices
self.assertEqual(len(tenant_choices), 3)
@test.create_stubs({api.neutron: ('network_list',),
api.keystone: ('tenant_list',)})
def test_admin_allocate_post_invalid_ip_version(self):
tenant = self.tenants.first()
pool = self.networks.first()
tenants = self.tenants.list()
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
search_opts = {'router:external': True}
api.neutron.network_list(IsA(http.HttpRequest), **search_opts) \
.AndReturn([pool])
self.mox.ReplayAll()
form_data = {'pool': pool.id,
'tenant': tenant.id,
'floating_ip_address': 'fc00::1'}
url = reverse('horizon:admin:floating_ips:allocate')
res = self.client.post(url, form_data)
self.assertContains(res, "Invalid version for IP address")
@test.create_stubs({api.neutron: ('tenant_floating_ip_allocate',
'network_list', 'subnet_get'),
api.keystone: ('tenant_list',)})
def test_admin_allocate_post(self):
tenant = self.tenants.first()
floating_ip = self.floating_ips.first()
subnet = self.subnets.first()
pool = self.networks.first()
tenants = self.tenants.list()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
search_opts = {'router:external': True}
api.neutron.network_list(IsA(http.HttpRequest), **search_opts) \
.AndReturn([pool])
api.neutron.tenant_floating_ip_allocate(
IsA(http.HttpRequest),
pool=pool.id,
tenant_id=tenant.id).AndReturn(floating_ip)
self.mox.ReplayAll()
form_data = {'pool': subnet.id,
'tenant': tenant.id}
url = reverse('horizon:admin:floating_ips:allocate')
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('tenant_floating_ip_list',
'floating_ip_disassociate',
'network_list'),
api.nova: ('server_list', ),
api.keystone: ('tenant_list', )})
def test_admin_disassociate_floatingip(self):
# Use neutron test data
fips = self.floating_ips.list()
floating_ip = self.floating_ips.list()[1]
servers = self.servers.list()
tenants = self.tenants.list()
api.neutron.tenant_floating_ip_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(fips)
api.nova.server_list(IsA(http.HttpRequest), search_opts={'all_tenants': True}) \
.AndReturn([servers, False])
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
params = {"router:external": True}
api.neutron.network_list(IsA(http.HttpRequest), **params) \
.AndReturn(self.networks.list())
api.neutron.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id)
self.mox.ReplayAll()
form_data = {
"action":
"floating_ips__disassociate__%s" % floating_ip.id}
res = self.client.post(INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.neutron: ('tenant_floating_ip_list',
'network_list'),
api.nova: ('server_list', ),
api.keystone: ('tenant_list', )})
def test_admin_delete_floatingip(self):
# Use neutron test data
fips = self.floating_ips.list()
floating_ip = self.floating_ips.list()[1]
servers = self.servers.list()
tenants = self.tenants.list()
api.neutron.tenant_floating_ip_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(fips)
api.nova.server_list(IsA(http.HttpRequest), search_opts={'all_tenants': True}) \
.AndReturn([servers, False])
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
params = {"router:external": True}
api.neutron.network_list(IsA(http.HttpRequest), **params) \
.AndReturn(self.networks.list())
self.mox.ReplayAll()
form_data = {
"action":
"floating_ips__delete__%s" % floating_ip.id}
res = self.client.post(INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.neutron: ('tenant_floating_ip_list',
'network_list'),
api.nova: ('server_list', ),
api.keystone: ('tenant_list', )})
def test_floating_ip_table_actions(self):
# Use neutron test data
fips = self.floating_ips.list()
servers = self.servers.list()
tenants = self.tenants.list()
api.neutron.tenant_floating_ip_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(fips)
api.nova.server_list(IsA(http.HttpRequest), search_opts={'all_tenants': True}) \
.AndReturn([servers, False])
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
params = {"router:external": True}
api.neutron.network_list(IsA(http.HttpRequest), **params) \
.AndReturn(self.networks.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
self.assertIn('floating_ips_table', res.context)
floating_ips_table = res.context['floating_ips_table']
floating_ips = floating_ips_table.data
self.assertEqual(len(floating_ips), 2)
# table actions
self.assertContains(res, 'id="floating_ips__action_allocate"')
self.assertContains(res, 'id="floating_ips__action_release"')
# row actions
self.assertContains(res, 'floating_ips__release__%s' % fips[0].id)
self.assertContains(res, 'floating_ips__release__%s' % fips[1].id)
self.assertContains(res, 'floating_ips__disassociate__%s' % fips[1].id)
|
|
# -*- coding: utf-8 -*-
#
# Release Notes documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 27 18:32:43 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('_ext'))
# Allow Sphinx to find rbintegrations.
sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..', '..', '..')))
import rbintegrations
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'beanbag_docutils.sphinx.ext.django_utils',
'beanbag_docutils.sphinx.ext.extlinks',
'beanbag_docutils.sphinx.ext.http_role',
'beanbag_docutils.sphinx.ext.intersphinx_utils',
'beanbag_docutils.sphinx.ext.retina_images',
'extralinks',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Release Notes'
copyright = '2017, Beanbag, Inc.'
author = 'Beanbag, Inc.'
bugtracker_url = 'https://reviewboard.org/bugs/%s'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join([str(i) for i in rbintegrations.__version_info__[:2]])
# The full version, including alpha/beta/rc tags.
release = rbintegrations.get_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = 'Release Notes'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ReleaseNotes.tex', 'Release Notes Documentation',
'Beanbag, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, will not define \strong, \code, \titleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = []
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Check whether reviewboard.org intersphinx lookups should use the local
# server.
if os.getenv('DOCS_USE_LOCAL_RBWEBSITE') == '1':
rbwebsite_url = 'http://localhost:8081'
else:
rbwebsite_url = 'https://www.reviewboard.org'
# Add references for intersphinx and custom roles.
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7', None),
'python2': ('https://docs.python.org/2.7', None),
'python3': ('https://docs.python.org/3', None),
'rb3.0': ('%s/docs/manual/3.0/' % rbwebsite_url, None),
'rb4.0': ('%s/docs/manual/4.0/' % rbwebsite_url, None),
'rb-dev': ('%s/docs/manual/dev/' % rbwebsite_url, None),
}
extlinks = {
'pypi': ('https://pypi.org/project/%s/', ''),
'rbintegration': ('https://www.reviewboard.org/integrations/%s', ''),
}
|
|
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
import unittest
from lxml import etree, html
from lxml.builder import E
from spyne import ComplexModel, XmlAttribute, Unicode, Array, Integer
from spyne.protocol.cloth import XmlCloth
from spyne.test import FakeContext
from spyne.util.six import BytesIO
class TestModelCloth(unittest.TestCase):
def test_root_html(self):
class SomeObject(ComplexModel):
class Attributes(ComplexModel.Attributes):
html_cloth = html.fromstring("<html><body spyne></body></html>")
assert SomeObject.Attributes._html_cloth is None
assert SomeObject.Attributes._html_root_cloth is not None
def test_html(self):
class SomeObject(ComplexModel):
class Attributes(ComplexModel.Attributes):
html_cloth = html.fromstring('<html><body spyne_id="za"></body></html>')
assert SomeObject.Attributes._html_cloth is not None
assert SomeObject.Attributes._html_root_cloth is None
def test_root_xml(self):
class SomeObject(ComplexModel):
class Attributes(ComplexModel.Attributes):
xml_cloth = etree.fromstring('<html><body spyne=""></body></html>')
assert SomeObject.Attributes._xml_cloth is None
assert SomeObject.Attributes._xml_root_cloth is not None
def test_xml(self):
class SomeObject(ComplexModel):
class Attributes(ComplexModel.Attributes):
xml_cloth = html.fromstring('<html><body spyne_id="za"></body></html>')
assert SomeObject.Attributes._xml_cloth is not None
assert SomeObject.Attributes._xml_root_cloth is None
class TestXmlCloth(unittest.TestCase):
def setUp(self):
self.ctx = FakeContext()
self.stream = BytesIO()
logging.basicConfig(level=logging.DEBUG)
def _run(self, inst, spid=None, cloth=None):
cls = inst.__class__
if cloth is None:
assert spid is not None
cloth = etree.fromstring("""<a><b spyne_id="%s"></b></a>""" % spid)
else:
assert spid is None
with etree.xmlfile(self.stream) as parent:
XmlCloth(cloth=cloth).subserialize(self.ctx, cls, inst, parent)
elt = etree.fromstring(self.stream.getvalue())
print(etree.tostring(elt, pretty_print=True))
return elt
def test_simple_value(self):
class SomeObject(ComplexModel):
s = Unicode
v = 'punk.'
elt = self._run(SomeObject(s=v), spid='s')
assert elt[0].text == v
def test_simple_empty(self):
class SomeObject(ComplexModel):
s = Unicode
elt = self._run(SomeObject(), spid='s')
assert len(elt) == 0
# FIXME: just fix it
def _test_simple_empty_nonoptional(self):
class SomeObject(ComplexModel):
s = Unicode(min_occurs=1)
elt = self._run(SomeObject(), spid='s')
assert elt[0].text is None
# FIXME: just fix it
def _test_simple_empty_nonoptional_clear(self):
class SomeObject(ComplexModel):
s = Unicode(min_occurs=1)
cloth = etree.fromstring("""<a><b spyne_id="s">oi punk!</b></a>""")
elt = self._run(SomeObject(), cloth=cloth)
assert elt[0].text is None
def test_simple_value_xmlattribute(self):
v = 'punk.'
class SomeObject(ComplexModel):
s = XmlAttribute(Unicode(min_occurs=1))
cloth = etree.fromstring("""<a></a>""")
elt = self._run(SomeObject(s=v), cloth=cloth)
assert elt.attrib['s'] == v
def test_array(self):
v = range(3)
class SomeObject(ComplexModel):
s = Array(Integer)
cloth = E.a(
E.b(
E.c(spyne_id="integer"),
spyne_id="s",
)
)
elt = self._run(SomeObject(s=v), cloth=cloth)
assert elt.xpath('//c/text()') == [str(i) for i in v]
def test_array_empty(self):
class SomeObject(ComplexModel):
s = Array(Integer)
elt_str = '<a><b spyne_id="s"><c spyne_id="integer"></c></b></a>'
cloth = etree.fromstring(elt_str)
elt = self._run(SomeObject(), cloth=cloth)
assert elt.xpath('//c') == []
# FIXME: just fix it
def _test_array_empty_nonoptional(self):
class SomeObject(ComplexModel):
s = Array(Integer(min_occurs=1))
elt_str = '<a><b spyne_id="s"><c spyne_id="integer"></c></b></a>'
cloth = etree.fromstring(elt_str)
elt = self._run(SomeObject(), cloth=cloth)
assert elt.xpath('//c') == [cloth[0][0]]
def test_simple_two_tags(self):
class SomeObject(ComplexModel):
s = Unicode
i = Integer
v = SomeObject(s='s', i=5)
cloth = E.a(
E.b1(),
E.b2(
E.c1(spyne_id="s"),
E.c2(),
),
E.e(
E.g1(),
E.g2(spyne_id="i"),
E.g3(),
),
)
elt = self._run(v, cloth=cloth)
print(etree.tostring(elt, pretty_print=True))
assert elt[0].tag == 'b1'
assert elt[1].tag == 'b2'
assert elt[1][0].tag == 'c1'
assert elt[1][0].text == 's'
assert elt[1][1].tag == 'c2'
assert elt[2].tag == 'e'
assert elt[2][0].tag == 'g1'
assert elt[2][1].tag == 'g2'
assert elt[2][1].text == '5'
assert elt[2][2].tag == 'g3'
def test_sibling_order(self):
class SomeObject(ComplexModel):
s = Unicode
v = SomeObject(s='s')
cloth = E.a(
E.b1(),
E.b2(
E.c0(),
E.c1(),
E.c2(spyne_id="s"),
E.c3(),
E.c4(),
),
)
elt = self._run(v, cloth=cloth)
print(etree.tostring(elt, pretty_print=True))
assert elt[0].tag == 'b1'
assert elt[1].tag == 'b2'
assert elt[1][0].tag == 'c0'
assert elt[1][1].tag == 'c1'
assert elt[1][2].tag == 'c2'
assert elt[1][2].text == 's'
assert elt[1][3].tag == 'c3'
assert elt[1][4].tag == 'c4'
def test_parent_text(self):
class SomeObject(ComplexModel):
s = Unicode
v = SomeObject(s='s')
cloth = E.a(
"text 0",
E.b1(spyne_id="s"),
)
print(etree.tostring(cloth, pretty_print=True))
elt = self._run(v, cloth=cloth)
print(etree.tostring(elt, pretty_print=True))
assert elt.tag == 'a'
assert elt.text == 'text 0'
assert elt[0].tag == 'b1'
assert elt[0].text == 's'
def test_anc_text(self):
class SomeObject(ComplexModel):
s = Unicode
v = SomeObject(s='s')
cloth = E.a(
E.b1(
"text 1",
E.c1(spyne_id="s"),
)
)
print(etree.tostring(cloth, pretty_print=True))
elt = self._run(v, cloth=cloth)
print(etree.tostring(elt, pretty_print=True))
assert elt[0].tag == 'b1'
assert elt[0].text == 'text 1'
assert elt[0][0].tag == 'c1'
assert elt[0][0].text == 's'
def test_prevsibl_tail(self):
class SomeObject(ComplexModel):
s = Unicode
v = SomeObject(s='s')
cloth = E.a(
E.b1(
E.c1(),
"text 2",
E.c2(spyne_id="s"),
)
)
print(etree.tostring(cloth, pretty_print=True))
elt = self._run(v, cloth=cloth)
print(etree.tostring(elt, pretty_print=True))
assert elt[0].tag == 'b1'
assert elt[0][0].tag == 'c1'
assert elt[0][0].tail == 'text 2'
assert elt[0][1].text == 's'
def test_sibling_tail_close(self):
class SomeObject(ComplexModel):
s = Unicode
v = SomeObject(s='s')
cloth = E.a(
E.b0(spyne_id="s"),
"text 3",
)
print(etree.tostring(cloth, pretty_print=True))
elt = self._run(v, cloth=cloth)
print(etree.tostring(elt, pretty_print=True))
assert elt[0].tag == 'b0'
assert elt[0].text == 's'
assert elt[0].tail == 'text 3'
def test_sibling_tail_close_sibling(self):
class SomeObject(ComplexModel):
s = Unicode
i = Integer
v = SomeObject(s='s', i=5)
cloth = E.a(
E.b0(spyne_id="s"),
"text 3",
E.b1(spyne_id="i"),
)
print(etree.tostring(cloth, pretty_print=True))
elt = self._run(v, cloth=cloth)
print(etree.tostring(elt, pretty_print=True))
assert elt[0].tag == 'b0'
assert elt[0].text == 's'
assert elt[0].tail == 'text 3'
def test_sibling_tail_close_anc(self):
class SomeObject(ComplexModel):
s = Unicode
i = Integer
v = SomeObject(s='s', i=5)
cloth = E.a(
E.b0(),
"text 0",
E.b1(
E.c0(spyne_id="s"),
"text 1",
E.c1(),
"text 2",
),
"text 3",
E.b2(
E.c1(spyne_id="i"),
"text 4",
)
)
print(etree.tostring(cloth, pretty_print=True))
elt = self._run(v, cloth=cloth)
print(etree.tostring(elt, pretty_print=True))
assert elt.xpath('/a/b1/c1')[0].tail == 'text 2'
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the Lattice class used by word lattice decoders.
"""
import logging
from theanolm.backend import InputError
class NodeNotFoundError(Exception):
pass
class Lattice(object):
"""Word Lattice
Word lattice describes a search space for decoding. The graph is represented
as a list of nodes and links. Each node contains pointers to its incoming
and outgoing links. Each link contains a pointer to the nodes in both ends.
"""
class Link(object):
"""A link between two graph nodes.
A link contains pointers to the start and end node. A node that has the
link as an outgoing link can find the next node from ``end_node`` and a
node that has the link as an incoming link can find the previous node
from ``start_node``.
"""
def __init__(self, start_node, end_node, word=None,
ac_logprob=None, lm_logprob=None, transitions=""):
"""Constructs a link.
:type start_node: self.Node
:param start_node: the node that has this link as an outgoing link
:type end_node: self.Node
:param end_node: the node that has this link as an incoming link
:type word: str or int
:param word: the word label on the link
:type ac_logprob: float
:param ac_logprob: acoustic log probability
:type lm_logprob: float
:param lm_logprob: language model log probability
:type transitions: str
:param transitions: transitions for an FST lattice
"""
self.start_node = start_node
self.end_node = end_node
self.word = word
self.ac_logprob = ac_logprob
self.lm_logprob = lm_logprob
self.transitions = transitions
class Node(object):
"""A node in the graph.
Outgoing and incoming links can be used to find the next and previous
nodes in the topology.
"""
def __init__(self, node_id):
"""Constructs a node with no links.
:type node_id: int
:param node_id: the ID that can be used to access the node in the
node list
"""
self.id = node_id
self.out_links = []
self.in_links = []
self.time = None
self.best_logprob = None
self.final = False
def __init__(self):
"""Constructs an empty lattice.
"""
self.nodes = []
self.links = []
self.initial_node = None
self.utterance_id = None
self.lm_scale = None
self.wi_penalty = None
def write_slf(self, output_file):
"""Writes the lattice in SLF format.
:type output_file: file object
:param output_file: a file where to write the output
"""
output_file.write("# Header (generated by TheanoLM)\n")
output_file.write("VERSION=1.1\n")
output_file.write('UTTERANCE="{}"\n'.format(self.utterance_id))
fields = []
if self.lm_scale is not None:
fields.append("lmscale={}".format(self.lm_scale))
if self.wi_penalty is not None:
fields.append("wdpenalty={}".format(self.wi_penalty))
if fields:
output_file.write("\t".join(fields) + "\n")
output_file.write("start={}\n".format(self.initial_node.id))
output_file.write("N={}\tL={}\n"
.format(len(self.nodes), len(self.links)))
output_file.write("# Nodes\n")
for node in self.nodes:
fields = ["I={}".format(node.id)]
if node.time is not None:
fields.append("t={}".format(node.time))
output_file.write("\t".join(fields) + "\n")
output_file.write("# Links\n")
for link_id, link in enumerate(self.links):
fields = ["J={}".format(link_id),
"S={}".format(link.start_node.id),
"E={}".format(link.end_node.id)]
if link.word is None:
fields.append("W=!NULL")
else:
fields.append("W={}".format(link.word))
if link.ac_logprob is not None:
fields.append("a={}".format(link.ac_logprob + 0.0))
if link.lm_logprob is not None:
fields.append("l={}".format(link.lm_logprob + 0.0))
output_file.write("\t".join(fields) + "\n")
def write_kaldi(self, output_file, word_to_id):
"""Writes the lattice in Kaldi CompactLattice format.
:type output_file: file object
:param output_file: a file where to write the output
:type word_to_id: Vocabulary
:param word_to_id: mapping of words to Kaldi IDs
"""
def write_normal_link(link):
word = link.word
if word is None:
word = "<eps>"
output_file.write("{} {} {} {},{},{}\n".format(
link.start_node.id,
link.end_node.id,
word_to_id[word],
-link.lm_logprob + 0.0,
-link.ac_logprob + 0.0,
link.transitions))
def write_final_link(link):
output_file.write("{} {},{},{}\n".format(
link.start_node.id,
-link.lm_logprob + 0.0,
-link.ac_logprob + 0.0,
link.transitions))
output_file.write("{}\n".format(self.utterance_id))
for node in self.nodes:
for link in node.out_links:
if link.end_node.final:
write_final_link(link)
else:
write_normal_link(link)
output_file.write("\n")
def sorted_nodes(self):
"""Sorts nodes topologically, then by time.
Returns a list which contains the nodes in sorted order. Uses the Kahn's
algorithm to sort the nodes topologically, but always picks the node
from the queue that has the lowest time stamp, if the nodes contain time
stamps.
"""
result = []
# A queue of nodes to be visited next:
node_queue = [self.initial_node]
# The number of incoming links not traversed yet:
in_degrees = [len(node.in_links) for node in self.nodes]
while node_queue:
node = node_queue.pop()
result.append(node)
for link in node.out_links:
next_node = link.end_node
in_degrees[next_node.id] -= 1
if in_degrees[next_node.id] == 0:
node_queue.append(next_node)
node_queue.sort(key=lambda x: (x.time is None, x.time),
reverse=True)
elif in_degrees[next_node.id] < 0:
raise InputError("Word lattice contains a cycle.")
if len(result) < len(self.nodes):
logging.warning("Word lattice contains unreachable nodes.")
else:
assert len(result) == len(self.nodes)
return result
def _add_link(self, start_node, end_node):
"""Adds a link between two nodes.
:type start_node: Node
:param start_node: creates a link from this node
:type end_node: Node
:param end_node: creates a link to this node
:rtype: Link
:returns: the created link
"""
link = self.Link(start_node, end_node)
self.links.append(link)
start_node.out_links.append(link)
end_node.in_links.append(link)
return link
|
|
FBIOGET_VSCREENINFO=0x4600
FBIOPUT_VSCREENINFO=0x4601
FBIOGET_FSCREENINFO=0x4602
FBIOGETCMAP=0x4604
FBIOPUTCMAP=0x4605
FBIOPAN_DISPLAY=0x4606
#FBIO_CURSOR IOWR('F', 0x08, struct fb_cursor)
FBIOGET_CON2FBMAP=0x460F
FBIOPUT_CON2FBMAP=0x4610
FBIOBLANK=0x4611
#FBIOGET_VBLANK=IOR('F',0x12, struct fb_vblank)
FBIO_ALLOC=0x4613
FBIO_FREE=0x4614
FBIOGET_GLYPH=0x4615
FBIOGET_HWCINFO=0x4616
FBIOPUT_MODEINFO=0x4617
FBIOGET_DISPINFO=0x4618
#FBIO_WAITFORVSYNC=IOW('F', 0x20, u32)
FB_TYPE_PACKED_PIXELS=0
FB_TYPE_PLANES=1
FB_TYPE_INTERLEAVED_PLANES=2
FB_TYPE_TEXT=3
FB_TYPE_VGA_PLANES=4
FB_TYPE_FOURCC=5
FB_AUX_TEXT_MDA=0
FB_AUX_TEXT_CGA=1
FB_AUX_TEXT_S3_MMIO=2
FB_AUX_TEXT_MGA_STEP16=3
FB_AUX_TEXT_MGA_STEP8=4
FB_AUX_TEXT_SVGA_GROUP=8
FB_AUX_TEXT_SVGA_MASK=7
FB_AUX_TEXT_SVGA_STEP2=8
FB_AUX_TEXT_SVGA_STEP4=9
FB_AUX_TEXT_SVGA_STEP8=10
FB_AUX_TEXT_SVGA_STEP16=11
FB_AUX_TEXT_SVGA_LAST=15
FB_AUX_VGA_PLANES_VGA4=0
FB_AUX_VGA_PLANES_CFB4=1
FB_AUX_VGA_PLANES_CFB8=2
FB_VISUAL_MONO01=0
FB_VISUAL_MONO10=1
FB_VISUAL_TRUECOLOR=2
FB_VISUAL_PSEUDOCOLOR=3
FB_VISUAL_DIRECTCOLOR=4
FB_VISUAL_STATIC_PSEUDOCOLOR=5
FB_VISUAL_FOURCC=6
FB_ACCEL_NONE=0
FB_ACCEL_ATARIBLITT=1
FB_ACCEL_AMIGABLITT=2
FB_ACCEL_S3_TRIO64=3
FB_ACCEL_NCR_77C32BLT=4
FB_ACCEL_S3_VIRGE=5
FB_ACCEL_ATI_MACH64GX=6
FB_ACCEL_DEC_TGA=7
FB_ACCEL_ATI_MACH64CT=8
FB_ACCEL_ATI_MACH64VT=9
FB_ACCEL_ATI_MACH64GT=10
FB_ACCEL_SUN_CREATOR=11
FB_ACCEL_SUN_CGSIX=12
FB_ACCEL_SUN_LEO=13
FB_ACCEL_IMS_TWINTURBO=14
FB_ACCEL_3DLABS_PERMEDIA2=15
FB_ACCEL_MATROX_MGA2064W=16
FB_ACCEL_MATROX_MGA1064SG=17
FB_ACCEL_MATROX_MGA2164W=18
FB_ACCEL_MATROX_MGA2164W_AGP=19
FB_ACCEL_MATROX_MGAG100=20
FB_ACCEL_MATROX_MGAG200=21
FB_ACCEL_SUN_CG14=22
FB_ACCEL_SUN_BWTWO=23
FB_ACCEL_SUN_CGTHREE=24
FB_ACCEL_SUN_TCX=25
FB_ACCEL_MATROX_MGAG400=26
FB_ACCEL_NV3=27
FB_ACCEL_NV4=28
FB_ACCEL_NV5=29
FB_ACCEL_CT_6555x=30
FB_ACCEL_3DFX_BANSHEE=31
FB_ACCEL_ATI_RAGE128=32
FB_ACCEL_IGS_CYBER2000=33
FB_ACCEL_IGS_CYBER2010=34
FB_ACCEL_IGS_CYBER5000=35
FB_ACCEL_SIS_GLAMOUR=36
FB_ACCEL_3DLABS_PERMEDIA3=37
FB_ACCEL_ATI_RADEON=38
FB_ACCEL_I810=39
FB_ACCEL_SIS_GLAMOUR_2=40
FB_ACCEL_SIS_XABRE=41
FB_ACCEL_I830=42
FB_ACCEL_NV_10=43
FB_ACCEL_NV_20=44
FB_ACCEL_NV_30=45
FB_ACCEL_NV_40=46
FB_ACCEL_XGI_VOLARI_V=47
FB_ACCEL_XGI_VOLARI_Z=48
FB_ACCEL_OMAP1610=49
FB_ACCEL_TRIDENT_TGUI=50
FB_ACCEL_TRIDENT_3DIMAGE=51
FB_ACCEL_TRIDENT_BLADE3D=52
FB_ACCEL_TRIDENT_BLADEXP=53
FB_ACCEL_CIRRUS_ALPINE=53
FB_ACCEL_NEOMAGIC_NM2070=90
FB_ACCEL_NEOMAGIC_NM2090=91
FB_ACCEL_NEOMAGIC_NM2093=92
FB_ACCEL_NEOMAGIC_NM2097=93
FB_ACCEL_NEOMAGIC_NM2160=94
FB_ACCEL_NEOMAGIC_NM2200=95
FB_ACCEL_NEOMAGIC_NM2230=96
FB_ACCEL_NEOMAGIC_NM2360=97
FB_ACCEL_NEOMAGIC_NM2380=98
FB_ACCEL_PXA3XX=99
FB_ACCEL_SAVAGE4=0x80
FB_ACCEL_SAVAGE3D=0x81
FB_ACCEL_SAVAGE3D_MV=0x82
FB_ACCEL_SAVAGE2000=0x83
FB_ACCEL_SAVAGE_MX_MV=0x84
FB_ACCEL_SAVAGE_MX=0x85
FB_ACCEL_SAVAGE_IX_MV=0x86
FB_ACCEL_SAVAGE_IX=0x87
FB_ACCEL_PROSAVAGE_PM=0x88
FB_ACCEL_PROSAVAGE_KM=0x89
FB_ACCEL_S3TWISTER_P=0x8a
FB_ACCEL_S3TWISTER_K=0x8b
FB_ACCEL_SUPERSAVAGE=0x8c
FB_ACCEL_PROSAVAGE_DDR=0x8d
FB_ACCEL_PROSAVAGE_DDRK=0x8e
FB_ACCEL_PUV3_UNIGFX=0xa0
from PIL.Image import ANTIALIAS
from mmap import mmap
from fcntl import ioctl
import struct
mm = None
bpp, w, h = 0, 0, 0 # framebuffer bpp and size
bytepp = 0
vx, vy, vw, vh = 0, 0, 0, 0 #virtual window offset and size
vi, fi = None, None
_fb_cmap = 'IIPPPP' # start, len, r, g, b, a
RGB = False
_verbose = False
msize_kb = 0
def report_fb(i=0, layer=0):
with open('/dev/fb'+str(i), 'r+b')as f:
vi = ioctl(f, FBIOGET_VSCREENINFO, bytes(160))
vi = list(struct.unpack('I'*40, vi))
ffm = 'c'*16+'L'+'I'*4+'H'*3+'ILIIHHH'
fic = struct.calcsize(ffm)
fi = struct.unpack(ffm, ioctl(f, FBIOGET_FSCREENINFO, bytes(fic)))
def ready_fb(_bpp=None, i=0, layer=0, _win=None):
global mm, bpp, w, h, vi, fi, RGB, msize_kb, vx, vy, vw, vh, bytepp
if mm and bpp == _bpp: return mm, w, h, bpp
with open('/dev/fb'+str(i), 'r+b')as f:
vi = ioctl(f, FBIOGET_VSCREENINFO, bytes(160))
vi = list(struct.unpack('I'*40, vi))
#(1920, 1080, 1920, 1080, 0, 0, 24, 0,
# w h vw vh xo yo bpp col
# virtual size offset 1=gray
# 16, 8, 0, 8, 8, 0, 0, 8, 0, 24, 0, 0, 0, 0, 4294967295, 4294967295, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# (bit offset, bits, bigend) non acv height(mm) width(mm) accl, pixclock, ..... angle, colorspace, reserved[4]
# R G B A std
bpp = vi[6]
bytepp = bpp//8
if _bpp:
vi[6] = _bpp # 24 bit = BGR 888 mode
#vi[8] = 0
#vi[14] = 16
try:
vi = ioctl(f, FBIOPUT_VSCREENINFO, struct.pack('I'*40, *vi)) # fb_var_screeninfo
vi = struct.unpack('I'*40,vi)
bpp = vi[6]
bytepp = bpp//8
except:
pass
if vi[8] == 0 : RGB = True
#r_o, r_b, r_e = vi[8:11]
#g_o, g_b, g_e = vi[11:14]
#b_o, b_b, b_e = vi[14:17]
ffm = 'c'*16+'L'+'I'*4+'H'*3+'ILIIHHH'
fic = struct.calcsize(ffm)
fi = struct.unpack(ffm, ioctl(f, FBIOGET_FSCREENINFO, bytes(fic)))
#(b'B', b'C', b'M', b'2', b'7', b'0', b'8', b' ', b'F', b'B', b'\x00', b'\x00', b'\x00', b'\x00', b'\x00', b'\x00',
# 16 char =id
# 1025519616, 6220800, 0, 0, 2, 1, 1, 0, 5760, 0, 0, 0, 0, 0, 0)
# smem_len type type_aux, visual, xpanstep, ypanstep, ywrapstep, line_length, mmio_start, mmio_len, accel, capabilities, reserved[2]
msize = fi[17] # = w*h*bpp//8
ll, start = fi[-7:-5]
# bpp = vi[9]+vi[12]+vi[15]+vi[18]
w, h = ll//bytepp, vi[1] # when screen is vertical, width becomes wrong. ll//3 is more accurate at such time.
if _win and len(_win)==4: # virtual window settings
vx, vy, vw, vh = _win
if vw == 'w': vw = w
if vh == 'h': vh = h
vx, vy, vw, vh = map(int, (vx, vy, vw, vh))
if vx>=w: vx = 0
if vy>=h: vy = 0
if vx>w: vw = w - vx
else: vw -= vx
if vy>h: vh = h - vy
else: vh -= vy
else:
vx, vy, vw, vh = 0,0,w,h
#msize_kb = w*h*bpp//8//1024 # more accurate FB memory size in kb
msize_kb = vw*vh*bytepp//1024 # more accurate FB memory size in kb
#xo, yo = vi[4], vi[5]
mm = mmap(f.fileno(), msize, offset=start)
return mm, w, h, bpp#ll//(bpp//8), h
def magick(fpath):
''' Use ImageMagick to convert to BGR '''
from subprocess import check_output, PIPE
try:
if b'ImageMagick' not in check_output('convert'):#, stdout=PIPE).stdout:
return
except FileNotFoundError:
return
p = run(['convert', '-verbose', '-coalesce', '-resize', "%dx%d"%(vw,vh), fpath, ('bgr' if bpp < 32 else 'bgra')+':-'], stdout=PIPE, stderr=PIPE, bufsize=0)
p, m = p.stdout, p.stderr
from re import findall
m = len(findall(b'gif\[([0-9]+)\]', m)) # may hang on single frame files
if not m : return p
r=[]
s=len(p)//m
for i in range(m):
r.append(p[s*i:s*(i+1)])
return r
def fill_scr(r,g,b):
if bpp == 32:
seed = struct.pack('BBBB', b, g, r, 255)
elif bpp == 24:
seed = struct.pack('BBB', b, g, r)
elif bpp == 16:
seed = struct.pack('H', r>>3<<11 | g>>2<<5 | b>>3)
mm.seek(0)
show_img(seed * vw * vh)
def fill_scr_ani(event=None, delay=1/30):
''' R - G - B transition animation, 30fps delay = 1/30 sec by default '''
from time import sleep
while not event or not event.is_set():
for i in range(256):
if event and event.is_set(): event.clear(); return
fill_scr(i, 255-i, 255) # abc
sleep(delay)
for i in range(256):
if event and event.is_set(): event.clear(); return
fill_scr(255, i, 255-i) # cab
sleep(delay)
for i in range(256):
if event and event.is_set(): event.clear(); return
fill_scr(255-i, 255, i) # bca
sleep(delay)
def black_scr():
fill_scr(0,0,0)
def white_scr():
fill_scr(255,255,255)
def mmseekto(x,y):
mm.seek((x + y*w) * bytepp)
def dot(x, y, r, g, b):
mmseekto(x,y)
mm.write(struct.pack('BBB',*((r,g,b) if RGB else (b,g,r))))
def get_pixel(x,y):
mmseekto(x,y)
return mm.read(bytepp)
# GIF should have BGR format data
def ready_img(fpath, resize=True):
from PIL import Image
im = Image.open(fpath)
return im.resize((vw,vh), ANTIALIAS) if resize else im
def _888_to_565(bt):
b = b''
for i in range(0, len(bt),3):
b += int.to_bytes(bt[i]>>3<<11|bt[i+1]>>2<<5|bt[i+2]>>3, 2, 'little')
return b
def numpy_888_565(bt):
import numpy as np
arr = np.fromstring(bt, dtype=np.uint32)
return (((0xF80000 & arr)>>8)|((0xFC00 & arr)>>5)|((0xF8 & arr)>>3)).astype(np.uint16).tostring()
def show_img(img):
if not type(img) is bytes:
if not RGB:
if bpp == 24: # for RPI
img = img.tobytes('raw', 'BGR')
else:
img = img.convert('RGBA').tobytes('raw', 'BGRA')
if bpp == 16:
img = numpy_888_565(img)
#img = img.tobytes('raw', 'BGR')
#img = _888_to_565(img)
#from io import BytesIO
#bt = BytesIO(img)
#for y in range(vh):
# mmseekto(vx,vy+y)
# for x in range(vw):
# b,g,r = [ord(bt.read(1)) for i in range(3)]
# mm.write(int.to_bytes(b>>3 | g>>2<<5 | r>>3<<11, 2, 'little'))
#return
else:
if bpp == 24:
img = img.tobytes()
else:
img = img.convert('RGBA').tobytes()
if bpp == 16:
img = numpy_888_565(img)
from io import BytesIO
b = BytesIO(img)
s = vw*bytepp
for y in range(vh): # virtual window drawing
mmseekto(vx,vy+y)
mm.write(b.read(s))
def _ready_gif(cut):
dur = 1
if cut.info.get('duration'):
dur = cut.info['duration']/1000
cut = cut.convert('RGBA' if bpp == 32 else 'RGB').resize((vw,vh), ANTIALIAS)
if not RGB:
return cut, dur
return cut.tobytes(), dur
def ready_gif(gif, preview=False):
from PIL import ImageSequence
#from multiprocessing import Pool
imgs = []
fm = ''
for l in open('/proc/meminfo'):
if l.startswith('MemFree:'):
fm = int(l.split()[1])
break
frame_limit = fm // msize_kb
for img in ImageSequence.Iterator(gif):
imgs.append(_ready_gif(img))#.copy())
if preview:
preview = False
show_img(imgs[0][0])
if len(imgs) >= frame_limit:
if _verbose:
#print('This file is too big to play. Limited to play only %d frames in total %d frames.' % (frame_limit, gif.n_frames))
print('This file is too big to play. Limited to play only %d frames.' % frame_limit )
break
#with Pool(4) as p:
#imgs=list(p.map(_ready_gif, imgs))
return imgs
def gif_loop(gif, event=None, force_loop=False, preview=False):
from threading import Thread, Event, Timer
from itertools import cycle
imgs = ready_gif(gif, preview)
if event is None : event = Event()
for i in range(force_loop if type(force_loop) is int else 1):
for img, dur in cycle(imgs) if force_loop is True else imgs:
if event and event.is_set():
return
Timer(dur, lambda e:event.set(), [event]).start()
show_img(img)
event.wait() # wait for animation frame duration
event.clear()
if __name__ == '__main__':
print('This is a pure Python library file. If you want to use as stand-alone, use \'main.py\' instead.')
exit(1)
|
|
import unittest
import colander
from cornice_swagger.converters import convert_schema as convert
from cornice_swagger.converters import TypeConversionDispatcher
from cornice_swagger.converters.exceptions import NoSuchConverter
from ..support import AnyType, AnyTypeConverter
class ConversionTest(unittest.TestCase):
def test_validate_all(self):
node = colander.SchemaNode(colander.String(),
validator=colander.All(
colander.Length(12, 42),
colander.Regex(r'foo*bar')
))
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'pattern': 'foo*bar',
'maxLength': 42,
'minLength': 12,
})
def test_support_custom_converters(self):
node = colander.SchemaNode(AnyType())
custom_converters = {AnyType: AnyTypeConverter}
converter = TypeConversionDispatcher(custom_converters)
ret = converter(node)
self.assertEquals(ret, {})
def test_support_default_converter(self):
node = colander.SchemaNode(AnyType())
converter = TypeConversionDispatcher(default_converter=AnyTypeConverter)
ret = converter(node)
self.assertEquals(ret, {})
def test_raise_no_such_converter_on_invalid_type(self):
node = colander.SchemaNode(dict)
self.assertRaises(NoSuchConverter, convert, node)
class StringConversionTest(unittest.TestCase):
def test_sanity(self):
node = colander.SchemaNode(colander.String())
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
})
def test_validate_default(self):
node = colander.SchemaNode(colander.String(), missing='foo')
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'default': 'foo',
})
def test_validate_length(self):
node = colander.SchemaNode(colander.String(),
validator=colander.Length(12, 42))
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'maxLength': 42,
'minLength': 12,
})
def test_validate_regex(self):
node = colander.SchemaNode(colander.String(),
validator=colander.Regex(r'foo*bar'))
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'pattern': 'foo*bar',
})
def test_validate_regex_email(self):
node = colander.SchemaNode(colander.String(),
validator=colander.Email())
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'format': 'email',
})
def test_validate_regex_url(self):
node = colander.SchemaNode(colander.String(),
validator=colander.url)
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'format': 'url',
})
def test_validate_oneof(self):
node = colander.SchemaNode(colander.String(),
validator=colander.OneOf(["one", "two"]))
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'enum': ['one', 'two'],
})
def test_title(self):
node = colander.SchemaNode(colander.String(), title='foo')
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'title': 'foo',
})
def test_description(self):
node = colander.SchemaNode(colander.String(),
description='bar')
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'description': 'bar',
})
class IntegerConversionTest(unittest.TestCase):
def test_sanity(self):
node = colander.SchemaNode(colander.Integer())
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'integer',
})
def test_default(self):
node = colander.SchemaNode(colander.Integer(), missing=1)
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'integer',
'default': 1,
})
def test_enum(self):
node = colander.SchemaNode(colander.Integer(),
validator=colander.OneOf([1, 2, 3, 4]))
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'integer',
'enum': [1, 2, 3, 4],
})
def test_range(self):
node = colander.SchemaNode(colander.Integer(),
validator=colander.Range(111, 555))
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'integer',
'minimum': 111,
'maximum': 555,
})
class DateTimeConversionTest(unittest.TestCase):
def test_sanity(self):
node = colander.SchemaNode(colander.DateTime())
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'string',
'format': 'date-time',
})
class MappingConversionTest(unittest.TestCase):
def test_sanity(self):
node = colander.MappingSchema()
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'object',
})
def test_required(self):
class Mapping(colander.MappingSchema):
foo = colander.SchemaNode(colander.String())
node = Mapping()
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'object',
'properties': {
'foo': {
'title': 'Foo',
'type': 'string'
}
},
'required': ['foo']
})
def test_not_required(self):
class Mapping(colander.MappingSchema):
foo = colander.SchemaNode(colander.String(),
missing=colander.drop)
node = Mapping()
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'object',
'properties': {
'foo': {
'title': 'Foo',
'type': 'string'
}
},
})
def test_nested_schema(self):
class BaseMapping(colander.MappingSchema):
foo = colander.SchemaNode(colander.String(),
missing=colander.drop)
class TopMapping(colander.MappingSchema):
bar = BaseMapping(missing=colander.drop)
node = TopMapping()
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'object',
'properties': {
'bar': {
'title': 'Bar',
'type': 'object',
'properties': {
'foo': {
'title': 'Foo',
'type': 'string'
}
}
}
},
})
def test_open_schema(self):
class Mapping(colander.MappingSchema):
foo = colander.SchemaNode(colander.String(),
missing=colander.drop)
@staticmethod
def schema_type():
return colander.Mapping(unknown='preserve')
node = Mapping()
ret = convert(node)
self.assertDictEqual(ret, {
'type': 'object',
'properties': {
'foo': {
'title': 'Foo',
'type': 'string'
}
},
'additionalProperties': {}
})
class SequenceConversionTest(unittest.TestCase):
def primitive_sequence_test(self):
class Integers(colander.SequenceSchema):
num = colander.SchemaNode(colander.Integer())
ret = convert(Integers)
self.assertDictEqual(ret, {
'type': 'array',
'items': {
'type': 'integer',
},
})
def mapping_sequence_test(self):
class BaseMapping(colander.MappingSchema):
name = colander.SchemaNode(colander.String())
number = colander.SchemaNode(colander.Integer())
class BaseMappings(colander.SequenceSchema):
base_mapping = BaseMapping()
schema = BaseMappings()
ret = convert(schema)
self.assertDictEqual(ret, {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {
'type': 'string',
'title': 'Name',
},
'number': {
'type': 'integer',
'title': 'Number',
}
},
'required': ['name', 'number'],
'title': 'Base Mapping',
},
})
|
|
#
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from conveyor.conveyorheat.engine import attributes
from conveyor.conveyorheat.engine import constraints
from conveyor.conveyorheat.engine import properties
from conveyor.conveyorheat.engine.resources.openstack.neutron import neutron
from conveyor.conveyorheat.engine import support
from conveyor.i18n import _
class HealthMonitor(neutron.NeutronResource):
"""A resource to handle load balancer health monitors.
This resource creates and manages Neutron LBaaS v2 healthmonitors,
which watches status of the load balanced servers.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
# Properties inputs for the resources create/update.
PROPERTIES = (
ADMIN_STATE_UP, DELAY, EXPECTED_CODES, HTTP_METHOD,
MAX_RETRIES, POOL, TIMEOUT, TYPE, URL_PATH, TENANT_ID
) = (
'admin_state_up', 'delay', 'expected_codes', 'http_method',
'max_retries', 'pool', 'timeout', 'type', 'url_path', 'tenant_id'
)
# Supported HTTP methods
HTTP_METHODS = (
GET, HEAT, POST, PUT, DELETE, TRACE, OPTIONS,
CONNECT, PATCH
) = (
'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS',
'CONNECT', 'PATCH'
)
# Supported output attributes of the resources.
ATTRIBUTES = (POOLS_ATTR) = ('pools')
properties_schema = {
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the health monitor.'),
default=True,
update_allowed=True,
constraints=[constraints.AllowedValues(['True'])]
),
DELAY: properties.Schema(
properties.Schema.INTEGER,
_('The minimum time in seconds between regular connections of '
'the member.'),
required=True,
update_allowed=True,
constraints=[constraints.Range(min=0, max=2147483647)]
),
EXPECTED_CODES: properties.Schema(
properties.Schema.STRING,
_('The HTTP status codes expected in response from the '
'member to declare it healthy. Specify one of the following '
'values: a single value, such as 200. a list, such as 200, 202. '
'a range, such as 200-204.'),
update_allowed=True,
default='200'
),
HTTP_METHOD: properties.Schema(
properties.Schema.STRING,
_('The HTTP method used for requests by the monitor of type '
'HTTP.'),
update_allowed=True,
default=GET,
constraints=[constraints.AllowedValues(HTTP_METHODS)]
),
MAX_RETRIES: properties.Schema(
properties.Schema.INTEGER,
_('Number of permissible connection failures before changing the '
'member status to INACTIVE.'),
required=True,
update_allowed=True,
constraints=[constraints.Range(min=1, max=10)],
),
POOL: properties.Schema(
properties.Schema.STRING,
_('ID or name of the load balancing pool.'),
required=True
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Maximum number of seconds for a monitor to wait for a '
'connection to be established before it times out.'),
required=True,
update_allowed=True,
constraints=[constraints.Range(min=0, max=2147483647)]
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('One of predefined health monitor types.'),
required=True,
constraints=[
constraints.AllowedValues(['PING', 'TCP', 'HTTP']),
]
),
URL_PATH: properties.Schema(
properties.Schema.STRING,
_('The HTTP path used in the HTTP request used by the monitor to '
'test a member health. A valid value is a string the begins '
'with a forward slash (/).'),
update_allowed=True,
default='/'
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the tenant who owns the health monitor.')
)
}
attributes_schema = {
POOLS_ATTR: attributes.Schema(
_('The list of Pools related to this monitor.'),
type=attributes.Schema.LIST
)
}
def __init__(self, name, definition, stack):
super(HealthMonitor, self).__init__(name, definition, stack)
self._lb_id = None
@property
def lb_id(self):
if self._lb_id is None:
pool_id = self.client_plugin().find_resourceid_by_name_or_id(
self.POOL,
self.properties[self.POOL],
cmd_resource='lbaas_pool')
pool = self.client().show_lbaas_pool(pool_id)['pool']
listener_id = pool['listeners'][0]['id']
listener = self.client().show_listener(listener_id)['listener']
self._lb_id = listener['loadbalancers'][0]['id']
return self._lb_id
def _check_lb_status(self):
return self.client_plugin().check_lb_status(self.lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_pool(
properties, self.POOL, 'pool_id')
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
healthmonitor = self.client().create_lbaas_healthmonitor(
{'healthmonitor': properties})['healthmonitor']
self.resource_id_set(healthmonitor['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
return self.client().show_lbaas_healthmonitor(
self.resource_id)['healthmonitor']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_lbaas_healthmonitor(
self.resource_id, {'healthmonitor': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_lbaas_healthmonitor(self.resource_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::HealthMonitor': HealthMonitor,
}
|
|
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Server Group API Extension."""
from oslo_log import log as logging
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.validation import parameter_types
import nova.exception
from nova.i18n import _
from nova.i18n import _LE
from nova import objects
from nova import utils
LOG = logging.getLogger(__name__)
SUPPORTED_POLICIES = ['anti-affinity', 'affinity']
authorize = extensions.extension_authorizer('compute', 'server_groups')
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
class ServerGroupController(wsgi.Controller):
"""The Server group API controller for the OpenStack API."""
def __init__(self, ext_mgr):
self.ext_mgr = ext_mgr
def _format_server_group(self, context, group):
# the id field has its value as the uuid of the server group
# There is no 'uuid' key in server_group seen by clients.
# In addition, clients see policies as a ["policy-name"] list;
# and they see members as a ["server-id"] list.
server_group = {}
server_group['id'] = group.uuid
server_group['name'] = group.name
server_group['policies'] = group.policies or []
# NOTE(danms): This has been exposed to the user, but never used.
# Since we can't remove it, just make sure it's always empty.
server_group['metadata'] = {}
members = []
if group.members:
# Display the instances that are not deleted.
filters = {'uuid': group.members, 'deleted': False}
instances = objects.InstanceList.get_by_filters(
context, filters=filters)
members = [instance.uuid for instance in instances]
server_group['members'] = members
return server_group
def _validate_policies(self, policies):
"""Validate the policies.
Validates that there are no contradicting policies, for example
'anti-affinity' and 'affinity' in the same group.
Validates that the defined policies are supported.
:param policies: the given policies of the server_group
"""
if ('anti-affinity' in policies and
'affinity' in policies):
msg = _("Conflicting policies configured!")
raise nova.exception.InvalidInput(reason=msg)
not_supported = [policy for policy in policies
if policy not in SUPPORTED_POLICIES]
if not_supported:
msg = _("Invalid policies: %s") % ', '.join(not_supported)
raise nova.exception.InvalidInput(reason=msg)
# Note(wingwj): It doesn't make sense to store duplicate policies.
if sorted(set(policies)) != sorted(policies):
msg = _("Duplicate policies configured!")
raise nova.exception.InvalidInput(reason=msg)
def _validate_input_body(self, body, entity_name):
if not self.is_valid_body(body, entity_name):
msg = _("the body is invalid.")
raise nova.exception.InvalidInput(reason=msg)
subbody = dict(body[entity_name])
expected_fields = ['name', 'policies']
for field in expected_fields:
value = subbody.pop(field, None)
if not value:
msg = _("'%s' is either missing or empty.") % field
raise nova.exception.InvalidInput(reason=msg)
if field == 'name':
utils.check_string_length(value, field,
min_length=1, max_length=255)
if not parameter_types.valid_name_regex_obj.search(value):
msg = _("Invalid format for name: '%s'") % value
raise nova.exception.InvalidInput(reason=msg)
elif field == 'policies':
if isinstance(value, list):
[utils.check_string_length(v, field,
min_length=1, max_length=255) for v in value]
self._validate_policies(value)
else:
msg = _("'%s' is not a list") % value
raise nova.exception.InvalidInput(reason=msg)
if subbody:
msg = _("unsupported fields: %s") % subbody.keys()
raise nova.exception.InvalidInput(reason=msg)
def show(self, req, id):
"""Return data about the given server group."""
context = _authorize_context(req)
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'server_group': self._format_server_group(context, sg)}
def delete(self, req, id):
"""Delete an server group."""
context = _authorize_context(req)
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
quotas = None
if self.ext_mgr.is_loaded('os-server-group-quotas'):
quotas = objects.Quotas(context=context)
project_id, user_id = objects.quotas.ids_from_server_group(context,
sg)
try:
# We have to add the quota back to the user that created
# the server group
quotas.reserve(project_id=project_id,
user_id=user_id, server_groups=-1)
except Exception:
quotas = None
LOG.exception(_LE("Failed to update usages deallocating "
"server group"))
try:
sg.destroy()
except nova.exception.InstanceGroupNotFound as e:
if quotas:
quotas.rollback()
raise webob.exc.HTTPNotFound(explanation=e.format_message())
if quotas:
quotas.commit()
return webob.Response(status_int=204)
def index(self, req):
"""Returns a list of server groups."""
context = _authorize_context(req)
project_id = context.project_id
if 'all_projects' in req.GET and context.is_admin:
sgs = objects.InstanceGroupList.get_all(context)
else:
sgs = objects.InstanceGroupList.get_by_project_id(
context, project_id)
limited_list = common.limited(sgs.objects, req)
result = [self._format_server_group(context, group)
for group in limited_list]
return {'server_groups': result}
def create(self, req, body):
"""Creates a new server group."""
context = _authorize_context(req)
try:
self._validate_input_body(body, 'server_group')
except nova.exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
quotas = None
if self.ext_mgr.is_loaded('os-server-group-quotas'):
quotas = objects.Quotas(context=context)
try:
quotas.reserve(project_id=context.project_id,
user_id=context.user_id, server_groups=1)
except nova.exception.OverQuota:
msg = _("Quota exceeded, too many server groups.")
raise exc.HTTPForbidden(explanation=msg)
vals = body['server_group']
sg = objects.InstanceGroup(context)
sg.project_id = context.project_id
sg.user_id = context.user_id
try:
sg.name = vals.get('name')
sg.policies = vals.get('policies')
sg.create()
except ValueError as e:
if quotas:
quotas.rollback()
raise exc.HTTPBadRequest(explanation=e)
if quotas:
quotas.commit()
return {'server_group': self._format_server_group(context, sg)}
class Server_groups(extensions.ExtensionDescriptor):
"""Server group support."""
name = "ServerGroups"
alias = "os-server-groups"
namespace = ("http://docs.openstack.org/compute/ext/"
"servergroups/api/v2")
updated = "2013-06-20T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-server-groups',
controller=ServerGroupController(self.ext_mgr),
member_actions={"action": "POST", })
resources.append(res)
return resources
|
|
# -*- test-case-name: go.vumitools.tests.test_api -*-
# -*- coding: utf-8 -*-
"""Convenience API, mostly for working with various datastores."""
from uuid import uuid4
from collections import defaultdict
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.blinkenlights.metrics import MetricManager
from vumi.errors import VumiError
from vumi.message import Message
from vumi.components.tagpool import TagpoolManager
from vumi.components.message_store import MessageStore
from vumi.persist.model import Manager
from vumi.persist.riak_manager import RiakManager
from vumi.persist.txriak_manager import TxRiakManager
from vumi.persist.redis_manager import RedisManager
from vumi.persist.txredis_manager import TxRedisManager
from vumi.service import Publisher
from vumi import log
from go.config import configured_conversations, configured_routers
from go.vumitools.account import AccountStore
from go.vumitools.channel import ChannelStore
from go.vumitools.contact import ContactStore
from go.vumitools.conversation import ConversationStore
from go.vumitools.opt_out import OptOutStore
from go.vumitools.router import RouterStore
from go.vumitools.conversation.utils import ConversationWrapper
from go.vumitools.token_manager import TokenManager
from vumi.message import TransportUserMessage
class TagpoolSet(object):
"""Holder for helper methods for retrieving tag pool information.
:param dict pools:
Dictionary of `tagpool name` -> `tagpool metadata` mappings.
"""
# TODO: this should ideally need to be moved somewhere else
# but it's purely cosmetic so it can live here for now
_DELIVERY_CLASS_NAMES = {
'sms': 'SMS',
'ussd': 'USSD',
'gtalk': 'Gtalk',
}
def __init__(self, pools):
self._pools = pools
def pools(self):
return self._pools.keys()
def display_name(self, pool):
return self._pools[pool].get('display_name', pool)
def country_name(self, pool, default):
return self._pools[pool].get('country_name', default)
def user_selects_tag(self, pool):
return self._pools[pool].get('user_selects_tag', False)
def delivery_class(self, pool):
return self._pools[pool].get('delivery_class', None)
def delivery_classes(self):
classes = set(self.delivery_class(pool) for pool in self.pools())
classes.discard(None)
return list(classes)
def delivery_class_name(self, delivery_class):
return self._DELIVERY_CLASS_NAMES.get(delivery_class, delivery_class)
class VumiUserApi(object):
conversation_wrapper = ConversationWrapper
def __init__(self, api, user_account_key, cleanup_api=False):
# We could get either bytes or unicode here. Decode if necessary.
if not isinstance(user_account_key, unicode):
user_account_key = user_account_key.decode('utf8')
self.api = api
self.manager = self.api.manager
self.user_account_key = user_account_key
self._cleanup_api = cleanup_api
self.conversation_store = ConversationStore(self.api.manager,
self.user_account_key)
self.contact_store = ContactStore(self.api.manager,
self.user_account_key)
self.router_store = RouterStore(self.api.manager,
self.user_account_key)
self.channel_store = ChannelStore(self.api.manager,
self.user_account_key)
self.optout_store = OptOutStore(self.api.manager,
self.user_account_key)
@Manager.calls_manager
def close(self):
if self._cleanup_api:
yield self.api.close()
def exists(self):
return self.api.user_exists(self.user_account_key)
@classmethod
def from_config_sync(cls, user_account_key, config):
return cls(
VumiApi.from_config_sync(config), user_account_key,
cleanup_api=True)
@classmethod
def from_config_async(cls, user_account_key, config):
d = VumiApi.from_config_async(config)
return d.addCallback(cls, user_account_key, cleanup_api=True)
def get_user_account(self):
return self.api.get_user_account(self.user_account_key)
def wrap_conversation(self, conversation):
"""Wrap a conversation with a ConversationWrapper.
What it says on the tin, really.
:param Conversation conversation:
Conversation object to wrap.
:rtype:
ConversationWrapper.
"""
return self.conversation_wrapper(conversation, self)
@Manager.calls_manager
def get_wrapped_conversation(self, conversation_key):
conversation = yield self.conversation_store.get_conversation_by_key(
conversation_key)
if conversation:
returnValue(self.wrap_conversation(conversation))
def get_conversation(self, conversation_key):
return self.conversation_store.get_conversation_by_key(
conversation_key)
def get_router(self, router_key):
return self.router_store.get_router_by_key(router_key)
@Manager.calls_manager
def get_channel(self, tag):
tagpool_meta = yield self.api.tpm.get_metadata(tag[0])
tag_info = yield self.api.mdb.get_tag_info(tag)
channel = yield self.channel_store.get_channel_by_tag(
tag, tagpool_meta, tag_info.current_batch.key)
returnValue(channel)
@Manager.calls_manager
def archived_conversations(self):
conv_store = self.conversation_store
keys = yield conv_store.list_conversations()
conversations = []
for bunch in conv_store.conversations.load_all_bunches(keys):
conversations.extend((yield bunch))
returnValue([c for c in conversations if c.archived()])
@Manager.calls_manager
def active_conversations(self):
keys = yield self.conversation_store.list_active_conversations()
# NOTE: This assumes that we don't have very large numbers of active
# conversations.
convs = []
for convs_bunch in self.conversation_store.load_all_bunches(keys):
convs.extend((yield convs_bunch))
returnValue(convs)
@Manager.calls_manager
def running_conversations(self):
keys = yield self.conversation_store.list_running_conversations()
# NOTE: This assumes that we don't have very large numbers of active
# conversations.
convs = []
for convs_bunch in self.conversation_store.load_all_bunches(keys):
convs.extend((yield convs_bunch))
returnValue(convs)
@Manager.calls_manager
def draft_conversations(self):
# TODO: This should probably be `stopped_conversations` instead, but we
# still apparently use `draft` in the UI in places.
conversations = yield self.active_conversations()
returnValue([c for c in conversations if c.stopped()])
@Manager.calls_manager
def active_routers(self):
keys = yield self.router_store.list_active_routers()
# NOTE: This assumes that we don't have very large numbers of active
# routers.
routers = []
for routers_bunch in self.router_store.load_all_bunches(keys):
routers.extend((yield routers_bunch))
returnValue(routers)
@Manager.calls_manager
def archived_routers(self):
conv_store = self.router_store
keys = yield conv_store.list_routers()
routers = []
for bunch in conv_store.routers.load_all_bunches(keys):
routers.extend((yield bunch))
returnValue([r for r in routers if r.archived()])
@Manager.calls_manager
def active_channels(self):
channels = []
user_account = yield self.get_user_account()
for tag in user_account.tags:
channel = yield self.get_channel(tuple(tag))
channels.append(channel)
returnValue(channels)
@Manager.calls_manager
def tagpools(self):
user_account = yield self.get_user_account()
tp_usage = defaultdict(int)
for tag in user_account.tags:
tp_usage[tag[0]] += 1
all_pools = yield self.api.tpm.list_pools()
allowed_pools = set()
for tp_bunch in user_account.tagpools.load_all_bunches():
for tp in (yield tp_bunch):
if (tp.max_keys is None
or tp.max_keys > tp_usage[tp.tagpool]):
allowed_pools.add(tp.tagpool)
available_pools = []
for pool in all_pools:
if pool not in allowed_pools:
continue
free_tags = yield self.api.tpm.free_tags(pool)
if free_tags:
available_pools.append(pool)
returnValue((yield self.api.tagpool_set(available_pools)))
@Manager.calls_manager
def applications(self):
user_account = yield self.get_user_account()
# NOTE: This assumes that we don't have very large numbers of
# applications.
app_permissions = []
for permissions in user_account.applications.load_all_bunches():
app_permissions.extend((yield permissions))
applications = [permission.application for permission
in app_permissions]
app_settings = configured_conversations()
returnValue(dict((application, app_settings[application])
for application in applications
if application in app_settings))
@Manager.calls_manager
def router_types(self):
# TODO: Permissions.
yield None
router_settings = configured_routers()
returnValue(dict((router_type, router_settings[router_type])
for router_type in router_settings))
def list_groups(self):
return self.contact_store.list_groups()
@Manager.calls_manager
def new_conversation(self, conversation_type, name, description, config,
batch_id=None, **fields):
if not batch_id:
batch_id = yield self.api.mdb.batch_start(
tags=[], user_account=self.user_account_key)
conv = yield self.conversation_store.new_conversation(
conversation_type, name, description, config, batch_id, **fields)
returnValue(conv)
@Manager.calls_manager
def new_router(self, router_type, name, description, config,
batch_id=None, **fields):
if not batch_id:
batch_id = yield self.api.mdb.batch_start(
tags=[], user_account=self.user_account_key)
router = yield self.router_store.new_router(
router_type, name, description, config, batch_id, **fields)
returnValue(router)
@Manager.calls_manager
def get_routing_table(self, user_account=None):
if user_account is None:
user_account = yield self.get_user_account()
if user_account.routing_table is None:
raise VumiError(
"Routing table missing for account: %s" % (user_account.key,))
returnValue(user_account.routing_table)
@Manager.calls_manager
def validate_routing_table(self, user_account=None):
"""Check that the routing table on this account is valid.
Currently we just check account ownership of tags and conversations.
TODO: Cycle detection, if that's even possible. Maybe other stuff.
TODO: Determine if this is necessary and move it elsewhere if it is.
"""
if user_account is None:
user_account = yield self.get_user_account()
routing_table = yield self.get_routing_table(user_account)
# We don't care about endpoints here, only connectors.
routing_connectors = set()
for src_conn, _src_ep, dst_conn, _dst_ep in routing_table.entries():
routing_connectors.add(src_conn)
routing_connectors.add(dst_conn)
# Checking tags is cheap and easy, so do that first.
channels = yield self.active_channels()
for channel in channels:
channel_conn = channel.get_connector()
if channel_conn in routing_connectors:
routing_connectors.remove(channel_conn)
# Now we run through active conversations to check those.
convs = yield self.active_conversations()
for conv in convs:
conv_conn = conv.get_connector()
if conv_conn in routing_connectors:
routing_connectors.remove(conv_conn)
# And lasting with active routers
routers = yield self.active_routers()
for router in routers:
router_inbound_conn = router.get_inbound_connector()
if router_inbound_conn in routing_connectors:
routing_connectors.remove(router_inbound_conn)
router_outbound_conn = router.get_outbound_connector()
if router_outbound_conn in routing_connectors:
routing_connectors.remove(router_outbound_conn)
if routing_connectors:
raise VumiError(
"Routing table contains illegal connector names: %s" % (
routing_connectors,))
@Manager.calls_manager
def _update_tag_data_for_acquire(self, user_account, tag):
# The batch we create here gets added to the tag_info and we can fish
# it out later. When we replace this with proper channel objects we can
# stash it there like we do with conversations and routers.
yield self.api.mdb.batch_start([tag], user_account=user_account.key)
user_account.tags.append(tag)
tag_info = yield self.api.mdb.get_tag_info(tag)
tag_info.metadata['user_account'] = user_account.key.decode('utf-8')
yield tag_info.save()
yield user_account.save()
@Manager.calls_manager
def acquire_tag(self, pool):
"""Acquire a tag from a given tag pool.
Tags should be held for the duration of a conversation.
:type pool: str
:param pool:
name of the pool to retrieve tags from.
:rtype:
The tag acquired or None if no tag was available.
"""
user_account = yield self.get_user_account()
if not (yield user_account.has_tagpool_permission(pool)):
log.warning("Account '%s' trying to access forbidden pool '%s'" % (
user_account.key, pool))
returnValue(None)
tag = yield self.api.tpm.acquire_tag(pool)
if tag is not None:
yield self._update_tag_data_for_acquire(user_account, tag)
returnValue(tag)
@Manager.calls_manager
def acquire_specific_tag(self, tag):
"""Acquire a specific tag.
Tags should be held for the duration of a conversation.
:type tag: tag tuple
:param tag:
The tag to acquire.
:rtype:
The tag acquired or None if the tag was not available.
"""
user_account = yield self.get_user_account()
if not (yield user_account.has_tagpool_permission(tag[0])):
log.warning("Account '%s' trying to access forbidden pool '%s'" % (
user_account.key, tag[0]))
returnValue(None)
tag = yield self.api.tpm.acquire_specific_tag(tag)
if tag is not None:
yield self._update_tag_data_for_acquire(user_account, tag)
returnValue(tag)
@Manager.calls_manager
def release_tag(self, tag):
"""Release a tag back to the pool it came from.
Tags should be released only once a conversation is finished.
:type pool: str
:param pool:
name of the pool to return the tag too (must be the same as
the name of the pool the tag came from).
:rtype:
None.
"""
user_account = yield self.get_user_account()
try:
user_account.tags.remove(list(tag))
except ValueError, e:
log.error("Tag not allocated to account: %s" % (tag,), e)
else:
tag_info = yield self.api.mdb.get_tag_info(tag)
if 'user_account' in tag_info.metadata:
del tag_info.metadata['user_account']
yield tag_info.save()
# NOTE: This loads and saves the CurrentTag object a second time.
# We should probably refactor the message store to make this
# less clumsy.
if tag_info.current_batch.key:
yield self.api.mdb.batch_done(tag_info.current_batch.key)
# Clean up routing table entries.
routing_table = yield self.get_routing_table(user_account)
routing_table.remove_transport_tag(tag)
yield user_account.save()
yield self.api.tpm.release_tag(tag)
def delivery_class_for_msg(self, msg):
# Sometimes we need a `delivery_class` but we don't always have (or
# want) one. This builds one from `msg['transport_type']`.
return {
TransportUserMessage.TT_SMS: 'sms',
TransportUserMessage.TT_USSD: 'ussd',
TransportUserMessage.TT_XMPP: 'gtalk',
TransportUserMessage.TT_TWITTER: 'twitter',
TransportUserMessage.TT_MXIT: 'mxit',
TransportUserMessage.TT_WECHAT: 'wechat',
}.get(msg['transport_type'],
msg['transport_type'])
def get_router_api(self, router_type, router_key):
return VumiRouterApi(self, router_type, router_key)
class VumiRouterApi(object):
def __init__(self, user_api, router_type, router_key):
self.user_api = user_api
self.manager = user_api.manager
self.router_type = router_type
self.router_key = router_key
def get_router(self):
return self.user_api.get_router(self.router_key)
@Manager.calls_manager
def archive_router(self, router=None):
if router is None:
router = yield self.get_router()
router.set_status_finished()
yield router.save()
yield self._remove_from_routing_table(router)
@Manager.calls_manager
def _remove_from_routing_table(self, router):
"""Remove routing entries for this router.
"""
user_account = yield self.user_api.get_user_account()
routing_table = yield self.user_api.get_routing_table(user_account)
routing_table.remove_router(router)
yield user_account.save()
@Manager.calls_manager
def start_router(self, router=None):
"""Send the start command to this router's worker.
The router is then responsible for processing this message as
appropriate and handling the state transition.
"""
if router is None:
router = yield self.get_router()
router.set_status_starting()
yield router.save()
yield self.dispatch_router_command('start')
@Manager.calls_manager
def stop_router(self, router=None):
"""Send the stop command to this router's worker.
The router is then responsible for processing this message as
appropriate and handling the state transition.
"""
if router is None:
router = yield self.get_router()
router.set_status_stopping()
yield router.save()
yield self.dispatch_router_command('stop')
def dispatch_router_command(self, command, *args, **kwargs):
"""Send a command to this router's worker.
:type command: str
:params command:
The name of the command to call
"""
worker_name = '%s_router' % (self.router_type,)
kwargs.setdefault('user_account_key', self.user_api.user_account_key)
kwargs.setdefault('router_key', self.router_key)
return self.user_api.api.send_command(
worker_name, command, *args, **kwargs)
class VumiApi(object):
def __init__(self, manager, redis, sender=None, metric_publisher=None):
# local import to avoid circular import since
# go.api.go_api needs to access VumiApi
from go.api.go_api.session_manager import SessionManager
self.manager = manager
self.redis = redis
self.tpm = TagpoolManager(self.redis.sub_manager('tagpool_store'))
self.mdb = MessageStore(
self.manager, self.redis.sub_manager('message_store'))
self.account_store = AccountStore(self.manager)
self.token_manager = TokenManager(
self.redis.sub_manager('token_manager'))
self.session_manager = SessionManager(
self.redis.sub_manager('session_manager'))
self.mapi = sender
self.metric_publisher = metric_publisher
@Manager.calls_manager
def close(self):
"""
Clean up our Redis and Riak managers.
This method is called `close` rather than `cleanup` so we can use
`contextlib.closing()`.
"""
yield self.redis.close_manager()
yield self.manager.close_manager()
@staticmethod
def _parse_config(config):
riak_config = config.get('riak_manager', {})
redis_config = config.get('redis_manager', {})
return riak_config, redis_config
@classmethod
def from_config_sync(cls, config, amqp_client=None):
riak_config, redis_config = cls._parse_config(config)
manager = RiakManager.from_config(riak_config)
redis = RedisManager.from_config(redis_config)
sender = SyncMessageSender(amqp_client)
metric_publisher = None
if amqp_client is not None:
metric_publisher = amqp_client.get_metric_publisher()
return cls(manager, redis, sender, metric_publisher)
@classmethod
@inlineCallbacks
def from_config_async(cls, config, command_publisher=None,
metric_publisher=None):
# Note: This takes a publisher rather than a client to avoid leaking
# AMQP channels by making our own transient publishers.
riak_config, redis_config = cls._parse_config(config)
manager = TxRiakManager.from_config(riak_config)
redis = yield TxRedisManager.from_config(redis_config)
sender = AsyncMessageSender(command_publisher)
returnValue(cls(manager, redis, sender, metric_publisher))
@Manager.calls_manager
def user_exists(self, user_account_key):
"""
Check whether or not a user exists. Useful to check before creating
a VumiUserApi since that does not do any type of checking itself.
:param str user_account_key:
The user account key to check.
"""
user_data = yield self.get_user_account(user_account_key)
returnValue(user_data is not None)
def get_user_account(self, user_account_key):
return self.account_store.get_user(user_account_key)
def get_user_api(self, user_account_key, cleanup_api=False):
return VumiUserApi(self, user_account_key, cleanup_api=cleanup_api)
def send_command(self, worker_name, command, *args, **kwargs):
"""Create a VumiApiCommand and send it.
:param str worker_name: Name of worker to send command to.
:param str command: Type of command to send.
:param *args: Positional args for command.
:param **kwargs: Keyword args for command.
"""
return self.mapi.send_command(
VumiApiCommand.command(worker_name, command, *args, **kwargs))
def get_metric_manager(self, prefix):
if self.metric_publisher is None:
raise VumiError("No metric publisher available.")
return MetricManager(prefix, publisher=self.metric_publisher)
@Manager.calls_manager
def tagpool_set(self, pools):
pool_data = dict([
(pool, (yield self.tpm.get_metadata(pool)))
for pool in pools])
returnValue(TagpoolSet(pool_data))
@Manager.calls_manager
def known_tagpools(self):
pools = yield self.tpm.list_pools()
returnValue((yield self.tagpool_set(pools)))
class SyncMessageSender(object):
def __init__(self, amqp_client):
self.amqp_client = amqp_client
def send_command(self, command):
if self.amqp_client is None:
raise VumiError("No command message publisher available.")
if not self.amqp_client.is_connected():
self.amqp_client.connect()
self.amqp_client.publish_command_message(command)
class AsyncMessageSender(object):
def __init__(self, command_publisher):
self.command_publisher = command_publisher
def send_command(self, command):
if self.command_publisher is None:
raise VumiError("No command message publisher available.")
return self.command_publisher.publish_message(command)
class ApiCommandPublisher(Publisher):
"""
Publisher for VumiApiCommand messages.
"""
routing_key = "vumi.api"
durable = True
class ApiEventPublisher(Publisher):
"""
Publisher for VumiApiEvent messages.
"""
routing_key = "vumi.event"
durable = True
class VumiApiCommand(Message):
@staticmethod
def generate_id():
"""
Generate a unique command id.
There are places where we want an identifier before we can build a
complete command. This lets us do that in a consistent manner.
"""
return uuid4().get_hex()
def process_fields(self, fields):
fields.setdefault('command_id', self.generate_id())
return fields
@classmethod
def command(cls, worker_name, command_name, *args, **kwargs):
params = {
'worker_name': worker_name,
'command': command_name,
'args': list(args), # turn to list to make sure input & output
# stay the same when encoded & decoded as
# JSON.
'kwargs': kwargs,
}
if "command_id" in kwargs:
params["command_id"] = kwargs.pop("command_id")
return cls(**params)
@classmethod
def conversation_command(cls, worker_name, command_name, user_account_key,
conversation_key, *args, **kwargs):
kwargs.update({
'user_account_key': user_account_key,
'conversation_key': conversation_key,
})
return cls.command(worker_name, command_name, *args, **kwargs)
class VumiApiEvent(Message):
@classmethod
def event(cls, account_key, conversation_key, event_type, content):
return cls(account_key=account_key,
conversation_key=conversation_key,
event_type=event_type,
content=content)
|
|
from ast import *
builtins = [
'color',
'wait',
'move',
'rotate',
'wheels',
'random',
'get_surface_color',
'terminate',
'abs',
'follow_line_to_intersect_or_end',
'set_line_speed',
'pick_direction',
'move_straight_until_line',
'there_is_way',
'get_line_speed',
'get_intersect_or_line_end_color',
]
colors = {
'BLACK': 0,
'RED': 1,
'GREEN': 2,
'YELLOW': 3,
'BLUE': 4,
'MAGENTA': 5,
'CYAN': 6,
'WHITE': 7,
}
directions = {
'STRAIGHT': 1,
'LEFT': 2,
'RIGHT': 4,
'BACK': 8,
}
terminate = {
'OFF': 0,
'FOLLOW': 1,
'IDLE': 2,
}
VERSION = [0x01, 0x03]
KILL = [0x00, 0xAE]
class CompileException(BaseException):
def __init__(self, msg, node = None):
if node is None:
super(CompileException, self).__init__(msg)
else:
super(CompileException, self).__init__("{0}:{1}".format(node.lineno - 1, node.col_offset), msg)
class Compiler:
def __init__(self):
self.bytecode = []
self.variable_counter = 0x2a
self.variables = {}
self.functions = {}
self.compiled_functions = {}
def calc_checksum(self):
result = 0
for byte in self.bytecode:
result -= byte
if result < 0:
result += 256
self.bytecode.append(result)
def get_length_bytes(self):
div = len(self.bytecode) // 256
remainder = (len(self.bytecode)) % 256
first_byte = 3
second_byte = 219 - len(self.bytecode)
while second_byte < 0:
first_byte -= 1
second_byte += 256
if first_byte < 0:
raise CompileException('Maximum bytecode length exceeded')
# return [(219 - len(self.bytecode)) % 256, len(self.bytecode) // 256, (len(self.bytecode)) % 256]
return [first_byte, second_byte, div, remainder]
def compile(self, root):
self.compile_stmt(root)
if len(self.bytecode) == 0:
return []
if self.bytecode[-1] != 0xae:
self.bytecode.extend(KILL)
# compile functions
for index, value in enumerate(self.bytecode):
if type(value) == str:
if value in self.compiled_functions.keys():
jump_index = self.compiled_functions[value]
self.bytecode[index] = 0x90
self.bytecode[index + 1] = jump_index // 256
self.bytecode[index + 2] = jump_index % 256
else:
self.bytecode[index] = 0x90
self.bytecode[index + 1] = len(self.bytecode) // 256
self.bytecode[index + 2] = len(self.bytecode) % 256
self.compiled_functions[value] = len(self.bytecode)
for n in self.functions[value]:
self.compile_stmt(n)
self.push(0x91)
self.bytecode = [0x01] + self.get_length_bytes() + self.bytecode
self.calc_checksum()
return self.bytecode
def compile_stmt(self, node):
if type(node) == Module:
for n in node.body:
self.compile_stmt(n)
elif type(node) == Expr:
self.compile_expr(node.value)
elif type(node) == Assign:
self.assign(node.targets, node.value)
elif type(node) == If:
self.if_stmt(node)
elif type(node) == While:
self.while_loop(node)
elif type(node) == FunctionDef:
self.function_def(node)
else:
raise CompileException('Unsupported statement type %s.\n%s' % (str(type(node)), str(vars(node))), node)
def compile_expr(self, node):
if type(node) == Call:
self.call(node)
elif type(node) == Num:
self.num(node)
elif type(node) == Name:
self.get_var(node)
elif type(node) == NameConstant:
self.name_constant(node)
elif type(node) == BoolOp:
self.bool_op(node)
elif type(node) == Compare:
self.compare(node)
elif type(node) == UnaryOp:
self.unary_op(node)
elif type(node) == BinOp:
self.bin_op(node)
else:
raise CompileException('Unsupported expression type %s.\n%s' % (str(type(node)), str(vars(node))), node)
def assign(self, targets, value):
for target in targets:
if type(target) != Name:
raise CompileException('Values can only be assigned to variables', target)
if target.id in colors.keys():
raise CompileException('Variable name cannot be one of the built in colors', target)
if target.id in directions.keys():
raise CompileException('Variable name cannot be one of the built in directions', target)
if target.id in self.variables:
key = self.variables[target.id]
else:
key = self.variable_counter
self.variables[target.id] = key
self.variable_counter += 1
self.compile_expr(value)
self.bytecode.extend([key, 0x93])
def call(self, node):
if node.func.id in builtins:
getattr(self, node.func.id)(*node.args)
elif node.func.id in self.functions.keys():
self.push(node.func.id)
self.push(0x00)
self.push(0x00)
else:
raise CompileException("Unknown function call %s" % node.func.id, node)
def num(self, node):
value = node.n
if value > 127:
raise CompileException("Value %s outside of valid range" % value, node)
self.push(value)
def get_var(self, node):
if node.id in colors.keys():
self.push(colors[node.id])
elif node.id in directions.keys():
self.push(directions[node.id])
elif node.id in terminate.keys():
self.push(terminate[node.id])
else:
if node.id not in self.variables:
raise CompileException('Undefined variable %s.' % node.id, node)
key = self.variables[node.id]
self.bytecode.extend([key, 0x92])
def if_stmt(self, node):
self.compile_expr(node.test)
self.push(0x80)
self.push(0)
index = len(self.bytecode) - 1
self.push(0x97)
for n in node.body:
self.compile_stmt(n)
self.bytecode[index] = len(self.bytecode[index:]) + 1
if len(node.orelse) > 0:
self.bytecode[index] += 3
self.push(0xba)
self.push(0)
index = len(self.bytecode) - 1
self.push(0x97)
for n in node.orelse:
self.compile_stmt(n)
self.bytecode[index] = len(self.bytecode[index:]) + 1
def name_constant(self, node):
if type(node.value) != bool:
raise CompileException('Only boolean constant type is supported. %s' % type(node.value), node)
self.push(1 if node.value else 0)
def bool_op(self, node):
self.compile_expr(node.values[0])
for i in range(1, len(node.values)):
self.compile_expr(node.values[i])
if type(node.op) == And:
self.push(0xa2)
elif type(node.op) == Or:
self.push(0xa3)
else:
raise CompileException("Unknown operator %s" % type(node.op), node.op)
def compare(self, node):
self.compile_expr(node.left)
for i in range(len(node.ops)):
self.compile_expr(node.comparators[i])
self.compare_ops(node.ops[i])
def compare_ops(self, op):
if type(op) == Eq:
self.push(0xa4)
elif type(op) == NotEq:
self.push(0xa4)
self.push(0x8a)
elif type(op) == Lt:
self.push(0x9c)
self.push(0x8a)
elif type(op) == LtE:
self.push(0x9d)
self.push(0x8a)
elif type(op) == Gt:
self.push(0x9d)
elif type(op) == GtE:
self.push(0x9c)
else:
raise CompileException('Unsupported operator', op)
def unary_op(self, node):
if type(node.op) == Not:
self.compile_expr(node.operand)
self.push(0x8a)
elif type(node.op) == USub:
self.compile_expr(node.operand)
# self.bytecode[-1] -= 1
self.push(0x8b)
else:
raise CompileException('Unsupported operator', node.op)
def bin_op(self, node):
self.compile_expr(node.left)
self.compile_expr(node.right)
if type(node.op) == Add:
self.push(0x85)
elif type(node.op) == Sub:
self.push(0x86)
elif type(node.op) == Mult:
self.push(0x87)
elif type(node.op) == Div:
self.push(0x88)
elif type(node.op) == Mod:
self.push(0x89)
else:
raise CompileException('Unsupported operator', node.op)
def while_loop(self, node):
# Infinite loop
if type(node.test) == NameConstant and node.test.value:
jump_index = len(self.bytecode)
for n in node.body:
self.compile_stmt(n)
self.push(0xba)
self.push(256 - len(self.bytecode[jump_index:]) + 1)
elif type(node.test) == NameConstant and not node.test.value:
return
else:
jump_back_index = len(self.bytecode)
self.compile_expr(node.test)
self.push(0x80)
self.push(0)
jump_index = len(self.bytecode) - 1
self.push(0x97)
for n in node.body:
self.compile_stmt(n)
self.push(0xba)
self.push(256 - len(self.bytecode[jump_back_index:]) + 1)
self.bytecode[jump_index] = len(self.bytecode[jump_index:]) + 1
def function_def(self, node):
self.functions[node.name] = node.body
def move(self, distance, speed):
self.compile_expr(distance)
self.compile_expr(speed)
self.push(0x9e)
def wait(self, seconds, centisec):
self.compile_expr(seconds)
if self.bytecode[-1] == 0:
del self.bytecode[-1]
else:
self.bytecode.extend([0x64, 0x9b, 0x1, 0x86, 0x94, 0x0, 0x9d, 0x8a, 0x80, 0xf8, 0x97, 0x96])
self.compile_expr(centisec)
self.push(0x9b)
def color(self, red, green, blue):
self.compile_expr(red)
self.compile_expr(green)
self.compile_expr(blue)
self.push(0xb8)
def rotate(self, degree, speed):
self.compile_expr(degree)
self.compile_expr(speed)
self.push(0x98)
def wheels(self, left, right):
self.compile_expr(left)
self.compile_expr(right)
self.push(0x9f)
def random(self, low, high):
self.compile_expr(high)
self.compile_expr(low)
self.push(0x8c)
def get_surface_color(self):
self.push(0x0e)
self.push(0x92)
def terminate(self, value):
self.compile_expr(value)
self.push(0xae)
def abs(self, value):
self.compile_expr(value)
self.push(0xa8)
def follow_line_to_intersect_or_end(self):
self.bytecode.extend([0x01, 0xa0, 0xac, 0xad, 0x9a, 0x10, 0xa4, 0x80, 0xfd, 0x00, 0xa0, 0x01, 0x29, 0x93])
def set_line_speed(self, speed):
self.compile_expr(speed)
self.push(0x18)
self.push(0x93)
def move_straight_until_line(self, speed):
self.compile_expr(speed)
self.bytecode.extend([0x94, 0x94, 0x9f, 0xac, 0x08, 0x92, 0x80, 0xfa, 0x97, 0x96, 0x00, 0x00, 0x9f, 0xc6, 0x01, 0xa0, 0xac, 0xad, 0x9a, 0x10, 0xa4, 0x80, 0xfd, 0x97, 0x00, 0xa0, 0x01, 0x29, 0x93])
def pick_direction(self, direction):
if type(direction) != Name and direction.id not in directions.keys():
raise CompileException('Unsupported direction', direction)
self.compile_expr(direction)
self.bytecode.extend([0x94, 0x10, 0x92, 0x81, 0x8a, 0xb7, 0x29, 0x92, 0x8a, 0xb7, 0x1f, 0x93, 0x01, 0xa0, 0xad, 0x9a, 0x14, 0xa4, 0x80, 0xfd, 0x00, 0xa0, 0x00, 0x29, 0x93])
def there_is_way(self, direction):
if type(direction) != Name and direction.id not in directions.keys():
raise CompileException('Unsupported direction', direction)
self.push(0x10)
self.push(0x92)
self.compile_expr(direction)
self.push(0x81)
def get_line_speed(self):
self.push(0x18)
self.push(0x92)
def get_intersect_or_line_end_color(self):
self.push(0x0f)
self.push(0x92)
def push(self, byte):
self.bytecode.append(byte)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test script for getting all the necessary funcitons working.
"""
from __future__ import print_function
import requests
from bs4 import BeautifulSoup
from geopy import Point
from geopy.distance import vincenty
from math import radians, sin, cos, degrees, atan2
# URLS
TRANSITVIEW = "http://www3.septa.org/hackathon/TransitView/{route}"
STOPIDS = "http://www3.septa.org/stops/bus-stop-ids.php"
STOPS = "http://www3.septa.org/hackathon/Stops/{route}"
DISTANCEMATRIX = "https://maps.googleapis.com/maps/api/distancematrix/json"
# TODO: Remove dependency for google distance matrix api
# Required config.json contains api key
with open("config.json", "r") as config:
import json
KEY = json.load(config)["distance_matrix"]
class SeptaNotifier(object):
"""Class for handling septa api."""
def __init__(self, route, direction, stop_id, user_offset=None):
self._route = route
self._direction = (direction or '').lower()
self._stop_id = stop_id
# Initialize properties
self._buses = None
self._stop_ids = None
self._stops_dict = None
self._stops = None
self._previous_stop = None
self._next_bus = None
self._eta = None
self._arrival_status = None
self.__nearest_bus_dist_matrix = None
self._user_offset = user_offset or 0
self.__validate_params()
def __validate_params(self):
"""
Validate/reformat all of the inputs
:raises: ValueError if there's a problem
with any of the args
"""
try:
self._route = int(self._route)
except (ValueError, TypeError) as e:
raise ValueError(
"route={}: route is required and must be an int".format(
self._route))
if not self._direction:
raise ValueError((
"Direction (northbound/southbound/eastbound/westbound) "
"is required"))
try:
self._stop_id = int(self._stop_id)
except (ValueError, TypeError) as e:
raise ValueError(
"stop_id={}: stop_id is required and must be an int".format(
self._stop_id))
try:
self._user_offset = int(self._user_offset)
except (ValueError, TypeError) as e:
raise ValueError(
"user_offset={}: user offset must be an int".format(
self._user_offset))
@property
def buses(self):
"""The raw json directly from transitview api."""
if self._buses is None:
self._buses = self.__transitview_json(self._route)
return self._buses
@property
def stop_ids(self):
"""List of sorted stop ids."""
if self._stop_ids is None:
self._stop_ids = self.__parse_bus_stop_ids(self._route,
self._direction)
return self._stop_ids
@property
def stops_dict(self):
"""Dict mapping stop id => stop dict."""
if self._stops_dict is None:
self._stops_dict = self.__stops_dict(self._route)
return self._stops_dict
@property
def stops(self):
"""Sorted list of stops dicts."""
if self._stops is None:
self._stops = self.__stop_order(self._route, self._direction)
return self._stops
@property
def current_stop(self):
"""The current stop object from the stop_id given."""
return self.stops_dict[self._stop_id]
@property
def previous_stop(self):
"""Last stop before given one."""
if self._previous_stop is None:
current_stop = self._stop_id
stops = self.stops
for i in xrange(1, len(stops)):
if stops[i]["stopid"] == current_stop:
self._previous_stop = stops[i - 1]
break
else:
# Given is the first stop in the schedule
self._previous_stop = {}
return self._previous_stop
@property
def next_bus(self):
"""The next to arrive bus (actual, not septa)."""
if self._next_bus is None:
self._next_bus = self.__bus_data(self._route, self._direction)
return self._next_bus
@property
def eta(self):
if self._eta is None:
self._next_bus = self.__bus_data(self._route, self._direction)
return self._eta
@property
def arrival_status(self):
if self._arrival_status is None:
arrival_status = self.eta / 300
if arrival_status > 6:
arrival_status = 6
self._arrival_status = arrival_status
return self._arrival_status
def __transitview_json(self, route):
"""Get bus locations for a route from septa transitview."""
resp = requests.get(TRANSITVIEW.format(route=route))
if resp.status_code != 200:
raise Exception("Could not get json for route {} from '{}'.".format(route, resp.url))
return resp.json()
def __parse_bus_stop_ids(self, route, direction):
"""
Parse html at http://www3.septa.org/stops/bus-stop-ids.php
to get sorted stop_ids.
"""
# Perform request
resp = requests.post(STOPIDS,
data={"Route": route, "Direction": direction})
if resp.status_code != 200:
raise Exception("({}) Could not get html from '{}': {}".format(
resp.status_code, resp.url, resp.text))
# Parse html
html_doc = resp.text
soup = BeautifulSoup(html_doc, 'html.parser')
stop_ids = soup.find_all("td", **{"class": "bluedata"})
# First match is most likelt column header ("Stop ID")
if not stop_ids[0].getText().isdigit():
del stop_ids[0]
return map(lambda x: int(x.getText()), stop_ids)
def __stops_dict(self, route):
"""Get the stops as a dictionary."""
# Get json from septa
resp = requests.get(STOPS.format(route=route))
if resp.status_code != 200:
raise Exception("({}) Could not get json from '{}': {}".format(
resp.status_code, resp.url, resp.text))
# Convert the list of dicts to dict mapping stop_id => stop dict
return {x["stopid"]: x for x in resp.json()}
def __stop_order(self, route, direction):
"""
Get the sorted list of stop names and ids by parsing the html
returned by this url:
http://www3.septa.org/stops/bus-stop-ids.php
route:
Bus route #
direction:
Northbound/Southbound/Eastbound/Westbound
return:
List of sorted stops returned by
http://www3.septa.org/hackathon/Stops/44
and dictionary of stops.
"""
# Get sorted list of stop ids
sorted_stop_ids = self.stop_ids
# Get the sorted list of stop dicts
stops_dict = self.stops_dict
stops = [stops_dict[x] for x in sorted_stop_ids if x in stops_dict]
return stops
def __bus_data(self, route, direction):
"""
Get how long it will take for the next bus to reach a given stop.
- From the stop id, get the lng/lat coords of that stop.
- Get the bus locations for a certain route.
- Filter by direction
- Need to know direction of the bus.
route:
Bus route number
direction:
Northbound/Southbound/Eastbound/Westbound
stop_id:
Unique ID for each bus stop
return:
Next bus actual, accounted for real time
"""
# Check params
if direction not in ("northbound", "southbound", "eastbound", "westbound"):
raise Exception(
"""
The direction provided ({}) is not an allowed direction
(Northbound/Southbound/Eastbound/Westbound)."""
.format(direction))
# Get buses
buses = self.buses["bus"]
if not buses:
# No buses are currently on route for this route
raise Exception("No buses are currently on route for this route")
# Filter by direction
# buses is the wrong buses returned from septa and filtered.
buses = filter(lambda x: x["Direction"].lower() == direction, buses)
if not buses:
# No buses are found in this direction.
raise Exception("No buses are found in this direction.")
"""
Get the approximate time for next bus.
The next to arrive bus will be the one with shortest distance between
the provided stop and the previous stop.
"""
current_stop = self.current_stop
previous_stop = self.previous_stop
curr_lat = current_stop["lat"]
curr_lng = current_stop["lng"]
prev_lat = prev_lng = None
if previous_stop:
prev_lat = previous_stop["lat"]
prev_lng = previous_stop["lng"]
stop_coords = (curr_lat, curr_lng)
prev_stop_coords = (prev_lat, prev_lng)
def grade(bus):
"""
The grade will be the distance between the bus and the given stop
+ the distance between the bus and the previous stop. A lower
grade is better.
If previous_stop is empty, do not do math for that stop.
"""
bus_coords = (float(bus["lat"]), float(bus["lng"]))
d_curr = vincenty(stop_coords, bus_coords).meters
if previous_stop:
d_prev = vincenty(prev_stop_coords, bus_coords).meters
else:
d_prev = 0
return d_curr + d_prev
# The next bus to arrive
# next_bus = min(buses, key=grade)
# return self.__nearest_real_bus(next_bus, curr_lat, curr_lng)
sorted_buses = sorted(buses, key=grade)
for bus in sorted_buses:
real_bus = self.__nearest_real_bus(bus, curr_lat, curr_lng)
if self.eta > 0:
return real_bus
return {}
def __bearing(self, start_lat, start_lng, end_lat, end_lng):
"""Get the bearing from start and end points."""
lat1 = radians(start_lat)
lat2 = radians(end_lat)
diff_long = radians(end_lng - start_lng)
x = sin(diff_long) * cos(lat2)
y = cos(lat1) * sin(lat2) - (sin(lat1) * cos(lat2) * cos(diff_long))
initial_bearing = degrees(atan2(x, y))
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
def __nearest_real_bus(self, septa_bus, end_lat, end_lng):
"""Get the real bus from a septa bus."""
# Get approx dist and time from google maps distance matrix api
offset_sec = int(septa_bus["Offset_sec"])
septa_lat = float(septa_bus["lat"])
septa_lng = float(septa_bus["lng"])
bus_coords_str = "{},{}".format(septa_lat, septa_lng)
end_coord_str = "{},{}".format(end_lat, end_lng)
params = {
"origins": bus_coords_str,
"destinations": end_coord_str,
"key": KEY
}
resp = requests.get(DISTANCEMATRIX, params=params)
if resp.status_code != 200:
raise Exception("({}) Could not get json from '{}': {}".format(
resp.status_code, resp.url, resp.text))
rows = resp.json()["rows"]
if not rows:
raise Exception("No rows found")
elems = rows[0]["elements"]
if not elems:
raise Exception("No elements found")
sorted_elems = sorted(elems, key=lambda x: x["duration"]["value"])
nearest_elem = None
for elem in sorted_elems:
duration_sec = elem["duration"]["value"]
if duration_sec > offset_sec:
# self._eta = duration_sec - offset_sec
nearest_elem = elem
break
else:
# No buses coming since they all passed the stop.
raise Exception("No buses coming since they all passed the stop.")
distance_met = nearest_elem["distance"]["value"]
duration_sec = nearest_elem["duration"]["value"]
eta = duration_sec - offset_sec - self._user_offset
self._eta = eta
# Get real lng, lat coords for bus
start = Point(septa_lat, septa_lng)
dist = vincenty(kilometers=distance_met / 1000.0)
bearing = self.__bearing(septa_lat, septa_lng, end_lat, end_lng)
dest = dist.destination(point=start, bearing=bearing)
bus_lat = dest.latitude
bus_lng = dest.longitude
return {
"lat": bus_lat,
"lng": bus_lng,
"label": septa_bus["label"],
"VehicleID": septa_bus["VehicleID"],
"BlockID": septa_bus["BlockID"],
"Direction": septa_bus["Direction"],
"destination": septa_bus["destination"],
"eta": eta
}
if __name__ == "__main__":
import json
x = SeptaNotifier(33, "NorthBound", 359, user_offset=300)
print(json.dumps({
"eta": x.eta,
"arrival_status": x.arrival_status
}, indent=4))
print(json.dumps({
"eta": x.eta,
"arrival_status": x.arrival_status,
"nearest_bus": x.next_bus
}, indent=4))
|
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
# pylint: disable=W,C
import math
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.2572235630016,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tileSize, ty * self.tileSize, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tileSize, (ty + 1) * self.tileSize, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2 ** zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i - 1 if i != 0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2 ** zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2 ** zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
# ---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tileSize=256):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180 / 256.0 / 2 ** zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180 / 256.0 / 2 ** zoom
# return 180 / float( 1 << (8+zoom) )
@staticmethod
def TileBounds(tx, ty, zoom):
"Returns bounds of the given tile"
res = 180 / 256.0 / 2 ** zoom
return (
tx * 256 * res - 180,
ty * 256 * res - 90,
(tx + 1) * 256 * res - 180,
(ty + 1) * 256 * res - 90,
)
if __name__ == "__main__":
import sys
def Usage(s=""):
print(
"Usage: globalmaptiles.py [-profile 'mercator'|'geodetic'] zoomlevel lat lon [latmax lonmax]"
)
print()
if s:
print(s)
print()
print(
"This utility prints for given WGS84 lat/lon coordinates (or bounding box) the list of tiles"
)
print(
"covering specified area. Tiles are in the given 'profile' (default is Google Maps 'mercator')"
)
print("and in the given pyramid 'zoomlevel'.")
print(
"For each tile several information is printed including bonding box in EPSG:900913 and WGS84."
)
sys.exit(1)
profile = "mercator"
zoomlevel = None
lat, lon, latmax, lonmax = None, None, None, None
boundingbox = False
argv = sys.argv
i = 1
while i < len(argv):
arg = argv[i]
if arg == "-profile":
i = i + 1
profile = argv[i]
if zoomlevel is None:
zoomlevel = int(argv[i])
elif lat is None:
lat = float(argv[i])
elif lon is None:
lon = float(argv[i])
elif latmax is None:
latmax = float(argv[i])
elif lonmax is None:
lonmax = float(argv[i])
else:
Usage("ERROR: Too many parameters")
i = i + 1
if profile != "mercator":
Usage("ERROR: Sorry, given profile is not implemented yet.")
if zoomlevel is None or lat is None or lon is None:
Usage("ERROR: Specify at least 'zoomlevel', 'lat' and 'lon'.")
if latmax is not None and lonmax is None:
Usage("ERROR: Both 'latmax' and 'lonmax' must be given.")
if latmax is not None and lonmax is not None:
if latmax < lat:
Usage("ERROR: 'latmax' must be bigger then 'lat'")
if lonmax < lon:
Usage("ERROR: 'lonmax' must be bigger then 'lon'")
boundingbox = (lon, lat, lonmax, latmax)
tz = zoomlevel
mercator = GlobalMercator()
mx, my = mercator.LatLonToMeters(lat, lon)
print("Spherical Mercator (ESPG:900913) coordinates for lat/lon: ")
print(mx, my)
tminx, tminy = mercator.MetersToTile(mx, my, tz)
if boundingbox:
mx, my = mercator.LatLonToMeters(latmax, lonmax)
print("Spherical Mercator (ESPG:900913) cooridnate for maxlat/maxlon: ")
print(mx, my)
tmaxx, tmaxy = mercator.MetersToTile(mx, my, tz)
else:
tmaxx, tmaxy = tminx, tminy
for ty in range(tminy, tmaxy + 1):
for tx in range(tminx, tmaxx + 1):
tilefilename = "%s/%s/%s" % (tz, tx, ty)
print(tilefilename, "( TileMapService: z / x / y )")
gx, gy = mercator.GoogleTile(tx, ty, tz)
print("\tGoogle:", gx, gy)
quadkey = mercator.QuadTree(tx, ty, tz)
print("\tQuadkey:", quadkey, "(", int(quadkey, 4), ")")
bounds = mercator.TileBounds(tx, ty, tz)
print(print("\tEPSG:900913 Extent: ", bounds))
wgsbounds = mercator.TileLatLonBounds(tx, ty, tz)
print("\tWGS84 Extent:", wgsbounds)
print(
"\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif"
% (
bounds[0],
bounds[1],
bounds[2],
bounds[3],
"<your-raster-file-in-epsg900913.ext>",
tz,
tx,
ty,
)
)
print()
|
|
from __future__ import print_function
import os
import os.path as op
from subprocess import CalledProcessError
import warnings
from nose.tools import assert_raises, assert_true
import numpy as np
from numpy.testing import (assert_equal, assert_allclose)
from mne.datasets import testing
from mne.io import Raw, read_raw_kit, read_raw_bti, read_info
from mne.io.constants import FIFF
from mne import (read_forward_solution, make_forward_solution,
do_forward_solution, read_trans,
convert_forward_solution, setup_volume_source_space,
read_source_spaces, make_sphere_model,
pick_types_forward, pick_info, pick_types, Transform)
from mne.utils import (requires_mne, requires_nibabel, _TempDir,
run_tests_if_main, slow_test, run_subprocess)
from mne.forward._make_forward import _create_meg_coils
from mne.forward._compute_forward import _magnetic_dipole_field_vec
from mne.forward import Forward
from mne.source_space import (get_volume_labels_from_aseg,
_compare_source_spaces, setup_source_space)
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
fname_bem_meg = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-bem-sol.fif')
def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
meg_rtol=1e-4, meg_atol=1e-9,
eeg_rtol=1e-3, eeg_atol=1e-3):
"""Helper to test forwards"""
# check source spaces
assert_equal(len(fwd['src']), len(fwd_py['src']))
_compare_source_spaces(fwd['src'], fwd_py['src'], mode='approx')
for surf_ori in [False, True]:
if surf_ori:
# use copy here to leave our originals unmodified
fwd = convert_forward_solution(fwd, surf_ori, copy=True)
fwd_py = convert_forward_solution(fwd, surf_ori, copy=True)
for key in ['nchan', 'source_nn', 'source_rr', 'source_ori',
'surf_ori', 'coord_frame', 'nsource']:
print(key)
assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7)
assert_allclose(fwd_py['mri_head_t']['trans'],
fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8)
assert_equal(fwd_py['sol']['data'].shape, (n_sensors, n_src))
assert_equal(len(fwd['sol']['row_names']), n_sensors)
assert_equal(len(fwd_py['sol']['row_names']), n_sensors)
# check MEG
assert_allclose(fwd['sol']['data'][:306],
fwd_py['sol']['data'][:306],
rtol=meg_rtol, atol=meg_atol,
err_msg='MEG mismatch')
# check EEG
if fwd['sol']['data'].shape[0] > 306:
assert_allclose(fwd['sol']['data'][306:],
fwd_py['sol']['data'][306:],
rtol=eeg_rtol, atol=eeg_atol,
err_msg='EEG mismatch')
def test_magnetic_dipole():
"""Test basic magnetic dipole forward calculation
"""
trans = Transform('mri', 'head', np.eye(4))
info = read_info(fname_raw)
picks = pick_types(info, meg=True, eeg=False, exclude=[])
info = pick_info(info, picks[:12])
coils = _create_meg_coils(info['chs'], 'normal', trans)
# magnetic dipole at device origin
r0 = np.array([0., 13., -6.])
for ch, coil in zip(info['chs'], coils):
rr = (ch['loc'][:3] + r0) / 2.
far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
ratio = 8. if ch['ch_name'][-1] == '1' else 16. # grad vs mag
assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
@testing.requires_testing_data
@requires_mne
def test_make_forward_solution_kit():
"""Test making fwd using KIT, BTI, and CTF (compensated) files
"""
kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
trans_path = op.join(kit_dir, 'trans-sample.fif')
fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')
bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_ctf_comp_raw.fif')
# first set up a small testing source space
temp_dir = _TempDir()
fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
src = setup_source_space('sample', fname_src_small, 'oct2',
subjects_dir=subjects_dir, add_dist=False)
n_src = 108 # this is the resulting # of verts in fwd
# first use mne-C: convert file, make forward solution
fwd = do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
assert_true(isinstance(fwd, Forward))
# now let's use python with the same raw file
fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
fname_bem_meg, eeg=False, meg=True)
_compare_forwards(fwd, fwd_py, 157, n_src)
assert_true(isinstance(fwd_py, Forward))
# now let's use mne-python all the way
raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
# without ignore_ref=True, this should throw an error:
assert_raises(NotImplementedError, make_forward_solution, raw_py.info,
src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
# check that asking for eeg channels (even if they don't exist) is handled
meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
eeg=False))
fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
bem=fname_bem_meg, trans=trans_path,
ignore_ref=True)
_compare_forwards(fwd, fwd_py, 157, n_src,
meg_rtol=1e-3, meg_atol=1e-7)
# BTI python end-to-end versus C
fwd = do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs)
fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
_compare_forwards(fwd, fwd_py, 248, n_src)
# now let's test CTF w/compensation
fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
fwd = do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True, subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
# CTF with compensation changed in python
ctf_raw = Raw(fname_ctf_raw, compensation=2)
fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
with warnings.catch_warnings(record=True):
fwd = do_forward_solution('sample', ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True,
subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
@slow_test
@testing.requires_testing_data
def test_make_forward_solution():
"""Test making M-EEG forward solution from python
"""
fwd_py = make_forward_solution(fname_raw, fname_trans, fname_src,
fname_bem, mindist=5.0, eeg=True, meg=True)
assert_true(isinstance(fwd_py, Forward))
fwd = read_forward_solution(fname_meeg)
assert_true(isinstance(fwd, Forward))
_compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3)
@testing.requires_testing_data
@requires_mne
def test_make_forward_solution_sphere():
"""Test making a forward solution with a sphere model"""
temp_dir = _TempDir()
fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
src = setup_source_space('sample', fname_src_small, 'oct2',
subjects_dir=subjects_dir, add_dist=False)
out_name = op.join(temp_dir, 'tmp-fwd.fif')
run_subprocess(['mne_forward_solution', '--meg', '--eeg',
'--meas', fname_raw, '--src', fname_src_small,
'--mri', fname_trans, '--fwd', out_name])
fwd = read_forward_solution(out_name)
sphere = make_sphere_model(verbose=True)
fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=True, verbose=True)
_compare_forwards(fwd, fwd_py, 366, 108,
meg_rtol=5e-1, meg_atol=1e-6,
eeg_rtol=5e-1, eeg_atol=5e-1)
# Since the above is pretty lax, let's check a different way
for meg, eeg in zip([True, False], [False, True]):
fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
fwd_py_['sol']['data'].ravel())[0, 1],
1.0, rtol=1e-3)
@testing.requires_testing_data
@requires_mne
def test_do_forward_solution():
"""Test wrapping forward solution from python
"""
temp_dir = _TempDir()
existing_file = op.join(temp_dir, 'test.fif')
with open(existing_file, 'w') as fid:
fid.write('aoeu')
mri = read_trans(fname_trans)
fname_fake = op.join(temp_dir, 'no_have.fif')
# ## Error checks
# bad subject
assert_raises(ValueError, do_forward_solution, 1, fname_raw,
subjects_dir=subjects_dir)
# bad meas
assert_raises(ValueError, do_forward_solution, 'sample', 1,
subjects_dir=subjects_dir)
# meas doesn't exist
assert_raises(IOError, do_forward_solution, 'sample', fname_fake,
subjects_dir=subjects_dir)
# don't specify trans and meas
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
subjects_dir=subjects_dir)
# specify both trans and meas
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
trans='me', mri='you', subjects_dir=subjects_dir)
# specify non-existent trans
assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
trans=fname_fake, subjects_dir=subjects_dir)
# specify non-existent mri
assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
mri=fname_fake, subjects_dir=subjects_dir)
# specify non-string mri
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
mri=1, subjects_dir=subjects_dir)
# specify non-string trans
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
trans=1, subjects_dir=subjects_dir)
# test specifying an actual trans in python space -- this should work but
# the transform I/O reduces our accuracy -- so we'll just hack a test here
# by making it bomb with eeg=False and meg=False
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
mri=mri, eeg=False, meg=False, subjects_dir=subjects_dir)
# mindist as non-integer
assert_raises(TypeError, do_forward_solution, 'sample', fname_raw,
mri=fname_trans, mindist=dict(), subjects_dir=subjects_dir)
# mindist as string but not 'all'
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
mri=fname_trans, eeg=False, mindist='yall',
subjects_dir=subjects_dir)
# src, spacing, and bem as non-str
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
mri=fname_trans, src=1, subjects_dir=subjects_dir)
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
mri=fname_trans, spacing=1, subjects_dir=subjects_dir)
assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
mri=fname_trans, bem=1, subjects_dir=subjects_dir)
# no overwrite flag
assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
existing_file, mri=fname_trans, subjects_dir=subjects_dir)
# let's catch an MNE error, this time about trans being wrong
assert_raises(CalledProcessError, do_forward_solution, 'sample',
fname_raw, existing_file, trans=fname_trans, overwrite=True,
spacing='oct6', subjects_dir=subjects_dir)
# No need to actually calculate and check here, since it's effectively
# done in previous tests.
@slow_test
@testing.requires_testing_data
@requires_nibabel(False)
def test_forward_mixed_source_space():
"""Test making the forward solution for a mixed source space
"""
temp_dir = _TempDir()
# get the surface source space
surf = read_source_spaces(fname_src)
# setup two volume source spaces
label_names = get_volume_labels_from_aseg(fname_aseg)
vol_labels = [label_names[int(np.random.rand() * len(label_names))]
for _ in range(2)]
vol1 = setup_volume_source_space('sample', fname=None, pos=20.,
mri=fname_aseg,
volume_label=vol_labels[0],
add_interpolator=False)
vol2 = setup_volume_source_space('sample', fname=None, pos=20.,
mri=fname_aseg,
volume_label=vol_labels[1],
add_interpolator=False)
# merge surfaces and volume
src = surf + vol1 + vol2
# calculate forward solution
fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem, None)
assert_true(repr(fwd))
# extract source spaces
src_from_fwd = fwd['src']
# get the coordinate frame of each source space
coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])
# assert that all source spaces are in head coordinates
assert_true((coord_frames == FIFF.FIFFV_COORD_HEAD).all())
# run tests for SourceSpaces.export_volume
fname_img = op.join(temp_dir, 'temp-image.mgz')
# head coordinates and mri_resolution, but trans file
assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
mri_resolution=True, trans=None)
# head coordinates and mri_resolution, but wrong trans file
vox_mri_t = vol1[0]['vox_mri_t']
assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
mri_resolution=True, trans=vox_mri_t)
run_tests_if_main()
|
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll
from os.path import join, dirname
from sys import argv, setrecursionlimit
from pygccxml import declarations
from pyplusplus.module_builder import call_policies
from ompl.bindings_generator import code_generator_t, default_replacement
class ompl_base_generator_t(code_generator_t):
"""Class for generating the ompl.base python module."""
def __init__(self):
replacement = default_replacement
# special case for abstract base class Path with pure virtual print method:
replacement['::ompl::base::Path::print'] = ('def("__str__", bp::pure_virtual(&__str__))', """
std::string __str__(%s* obj)
{
std::ostringstream s;
obj->print(s);
return s.str();
}
""")
# A C++ call like "foo.printState(state, std::cout)" will be replaced with
# something more pythonesque: "print(foo.string(state))"
replacement['printState'] = ('def("string", &__printState)', """
std::string __printState(%s* space, ompl::base::State* state)
{
std::ostringstream s;
space->printState(state, s);
return s.str();
}
""")
# A C++ call like "foo.printProperties(std::cout)" will be replaced with
# something more pythonesque: "print(foo.properties())"
default_replacement['printProperties'] = ('def("properties", &__printProperties)', """
std::string __printProperties(%s* obj)
{
std::ostringstream s;
obj->printProperties(s);
return s.str();
}
""")
# A C++ call like "foo.printProjections(std::cout)" will be replaced with
# something more pythonesque: "print(foo.projections())"
replacement['printProjections'] = ('def("projections", &__printProjections)', """
std::string __printProjections(%s* obj)
{
std::ostringstream s;
obj->printProjections(s);
return s.str();
}
""")
# A C++ call like "foo.printProjection(projection, std::cout)" will be replaced with
# something more pythonesque: "print(foo.projection())"
replacement['printProjection'] = ('def("projection", &__printProjection)', """
std::string __printProjection(%s* obj, const ompl::base::EuclideanProjection &projection)
{
std::ostringstream s;
obj->printProjection(projection, s);
return s.str();
}
""")
# "StateSpace::Diagram(std::cout)" will be replaced with
# something more pythonesque: "print(StateSpace.Diagram())"
replacement['::ompl::base::StateSpace::Diagram'] = ('def("Diagram", &DiagramWrapper)', """
std::string DiagramWrapper(%s* obj)
{
std::ostringstream s;
obj->Diagram(s);
return s.str();
}
""")
# "StateSpace::List(std::cout)" will be replaced with
# something more pythonesque: "print(StateSpace.List())"
replacement['::ompl::base::StateSpace::List'] = ('def("List", &ListWrapper)', """
std::string ListWrapper(%s* obj)
{
std::ostringstream s;
obj->List(s);
return s.str();
}
""")
# "PlannerData::printGraphML(std::cout)" will be replaced with
# something more pythonesque: "print(PlannerData.printGraphML())"
replacement['::ompl::base::PlannerData::printGraphML'] = ('def("printGraphML", &__printGraphML)', """
std::string __printGraphML(%s* obj)
{
std::ostringstream s;
obj->printGraphML(s);
return s.str();
}
""")
# "PlannerData::printGraphviz(std::cout)" will be replaced with
# something more pythonesque: "print(PlannerData.printGraphviz())"
replacement['::ompl::base::PlannerData::printGraphviz'] = ('def("printGraphviz", &__printGraphviz)', """
std::string __printGraphviz(%s* obj)
{
std::ostringstream s;
obj->printGraphviz(s);
return s.str();
}
""")
code_generator_t.__init__(self, 'base', ['bindings/util'], replacement)
def filter_declarations(self):
# force ProblemDefinition to be included, because it is used by other modules
self.ompl_ns.class_('ProblemDefinition').include()
# force the abstract base class Path to be included, because it is used by other modules
self.ompl_ns.class_('Path').include()
code_generator_t.filter_declarations(self)
# rename STL vectors of certain types
self.std_ns.class_('map< std::string, std::shared_ptr< ompl::base::ProjectionEvaluator > >').rename('mapStringToProjectionEvaluator')
self.std_ns.class_('vector< ompl::base::State * >').rename('vectorState')
try:
self.std_ns.class_('vector< ompl::base::State const* >').rename('vectorConstState')
except: pass
self.std_ns.class_('vector< std::shared_ptr<ompl::base::StateSpace> >').rename('vectorStateSpacePtr')
#self.std_ns.class_('vector< <ompl::base::PlannerSolution> >').rename('vectorPlannerSolution')
self.std_ns.class_('map< std::string, std::shared_ptr<ompl::base::GenericParam> >').rename('mapStringToGenericParam')
self.std_ns.class_('map< std::string, ompl::base::StateSpace::SubstateLocation >').rename('mapStringToSubstateLocation')
self.std_ns.class_('vector<ompl::base::PlannerSolution>').rename('vectorPlannerSolution')
pairStateDouble = self.std_ns.class_('pair<ompl::base::State *, double>')
pairStateDouble.rename('pairStateDouble')
pairStateDouble.include()
self.ompl_ns.member_functions('maybeWrapBool').exclude()
# rename some templated types
self.ompl_ns.class_('SpecificParam< bool >').rename('SpecificParamBool')
self.ompl_ns.class_('SpecificParam< char >').rename('SpecificParamChar')
self.ompl_ns.class_('SpecificParam< int >').rename('SpecificParamInt')
self.ompl_ns.class_('SpecificParam< unsigned int >').rename('SpecificParamUint')
self.ompl_ns.class_('SpecificParam< float >').rename('SpecificParamFloat')
self.ompl_ns.class_('SpecificParam< double >').rename('SpecificParamDouble')
self.ompl_ns.class_('SpecificParam< std::basic_string<char> >').rename('SpecificParamString')
for cls in self.ompl_ns.classes(lambda decl: decl.name.startswith('SpecificParam')):
cls.constructors().exclude()
# don't export variables that need a wrapper
self.ompl_ns.variables(lambda decl: decl.is_wrapper_needed()).exclude()
# force StateSpace::allocState to be exported.
# (not sure why this is necessary)
allocStateFn = self.ompl_ns.class_('StateSpace').member_function('allocState')
allocStateFn.include()
allocStateFn.call_policies = \
call_policies.return_value_policy(call_policies.reference_existing_object)
# rename the abstract base class State to AbstractState
state = self.ompl_ns.class_('State')
state.rename('AbstractState')
# don't export components which is of type State**
state = self.ompl_ns.class_('CompoundState')
state.variable('components').exclude()
state.rename('CompoundStateInternal')
# rename a ScopedState<> to State
bstate = self.ompl_ns.class_('ScopedState< ompl::base::StateSpace >')
bstate.rename('State')
bstate.operator('=', arg_types=['::ompl::base::State const &']).exclude()
# add array access to double components of state
self.add_array_access(bstate,'double')
# loop over all predefined state spaces
for stype in ['Compound', 'RealVector', 'SO2', 'SO3', 'SE2', 'SE3', 'Discrete', 'Time', 'Dubins', 'ReedsShepp']:
# create a python type for each of their corresponding state types
state = self.ompl_ns.class_('ScopedState< ompl::base::%sStateSpace >' % stype)
state.rename(stype+'State')
state.operator('=', arg_types=['::ompl::base::State const &']).exclude()
# add a constructor that allows, e.g., an SE3State to be constructed from a State
state.add_registration_code(
'def(bp::init<ompl::base::ScopedState<ompl::base::StateSpace> const &>(( bp::arg("other") )))')
# mark the space statetype as 'internal' to emphasize that it
# shouldn't typically be used by a regular python user
if stype!='Dubins' and stype!='ReedsShepp':
self.ompl_ns.class_(stype + 'StateSpace').decls('StateType').rename(
stype + 'StateInternal')
# add a constructor that allows, e.g., a State to be constructed from a SE3State
bstate.add_registration_code(
'def(bp::init<ompl::base::ScopedState<ompl::base::%sStateSpace> const &>(( bp::arg("other") )))' % stype)
# add array access to double components of state
self.add_array_access(state,'double')
# I don't know how to export a C-style array of an enum type
for stype in ['Dubins', 'ReedsShepp']:
self.ompl_ns.enumeration(stype + 'PathSegmentType').exclude()
self.ompl_ns.class_(stype + 'Path').exclude()
self.ompl_ns.class_(stype + 'StateSpace').member_function(
stype[0].lower()+stype[1:]).exclude()
# don't expose these utility functions that return double*
self.ompl_ns.member_functions('getValueAddressAtIndex').exclude()
self.ompl_ns.member_functions('getValueAddressAtName').exclude()
self.ompl_ns.member_functions('getValueAddressAtLocation').exclude()
# don't export vector<ValueLocation>
self.ompl_ns.member_functions('getValueLocations').exclude()
# don't export map<std::string, ValueLocation>
self.ompl_ns.member_functions('getValueLocationsByName').exclude()
# don't expose double*
self.ompl_ns.class_('RealVectorStateSpace').class_(
'StateType').variable('values').exclude()
try:
stateStorage = self.ompl_ns.class_('StateStorage')
stateStorage.member_function('getStateSamplerAllocatorRange').exclude()
stateStorage.add_registration_code('def("getStateSamplerAllocatorRange", &ompl::base::StateStorage::getStateSamplerAllocatorRange)')
except:
pass
cls = self.ompl_ns.class_('PlannerStatus')
# rename to something more memorable than the default Py++ name for
# the casting operator:
# as__scope_ompl_scope_base_scope_PlannerStatus_scope_StatusType
cls.operator(lambda decl: decl.name=='operator ::ompl::base::PlannerStatus::StatusType').rename('getStatus')
# for python 2.x
cls.add_registration_code(
'def("__nonzero__", &ompl::base::PlannerStatus::operator bool)')
# for python 3.x
cls.add_registration_code(
'def("__bool__", &ompl::base::PlannerStatus::operator bool)')
# Using nullptr as a default value in method arguments causes
# problems with Boost.Python.
# See https://github.com/boostorg/python/issues/60
self.ompl_ns.class_('ProblemDefinition').add_declaration_code('#define nullptr NULL\n')
# Exclude PlannerData::getEdges function that returns a map of PlannerDataEdge* for now
#self.ompl_ns.class_('PlannerData').member_functions('getEdges').exclude()
#self.std_ns.class_('map< unsigned int, ompl::base::PlannerDataEdge const*>').include()
mapUintToPlannerDataEdge_cls = self.std_ns.class_('map< unsigned int, const ompl::base::PlannerDataEdge *>')
mapUintToPlannerDataEdge_cls.rename('mapUintToPlannerDataEdge')
mapUintToPlannerDataEdge_cls.indexing_suite.call_policies = \
call_policies.return_value_policy(call_policies.reference_existing_object)
# Remove Boost.Graph representation from PlannerData
self.ompl_ns.class_('PlannerData').member_functions('toBoostGraph').exclude()
# Make PlannerData printable
self.replace_member_function(self.ompl_ns.class_('PlannerData').member_function('printGraphviz'))
self.replace_member_function(self.ompl_ns.class_('PlannerData').member_function('printGraphML'))
# serialize passes archive by reference which causes problems
self.ompl_ns.class_('PlannerDataVertex').member_functions('serialize').exclude()
self.ompl_ns.class_('PlannerDataEdge').member_functions('serialize').exclude()
# add array indexing to the RealVectorState
self.add_array_access(self.ompl_ns.class_('RealVectorStateSpace').class_('StateType'))
# typedef's are not handled by Py++, so we need to explicitly rename uBLAS vector to EuclideanProjection
cls = self.mb.namespace('ublas').class_(
'vector<double, boost::numeric::ublas::unbounded_array<double, std::allocator<double> > >')
cls.include()
cls.rename('EuclideanProjection')
cls.member_functions().exclude()
cls.operators().exclude()
self.add_array_access(cls,'double')
# make objects printable that have a print function
self.replace_member_functions(self.ompl_ns.member_functions('print'))
# handle special case (abstract base class with pure virtual method)
self.ompl_ns.class_('Path').add_wrapper_code(
'virtual void print(std::ostream&) const {}')
# make settings printable
self.replace_member_functions(self.ompl_ns.member_functions('printSettings'))
# make properties printable
self.replace_member_functions(self.ompl_ns.member_functions('printProperties'))
# make states printable
self.replace_member_functions(self.ompl_ns.member_functions('printState'))
# make list of available projections printable
self.replace_member_functions(self.ompl_ns.member_functions('printProjections'))
# make projections printable
self.replace_member_functions(self.ompl_ns.member_functions('printProjection'))
# make state space diagram printable
self.replace_member_function(self.ompl_ns.class_('StateSpace').member_function('Diagram'))
# make state space list printable
self.replace_member_function(self.ompl_ns.class_('StateSpace').member_function('List'))
# add wrappers for std::function types
self.add_function_wrapper('bool(const ompl::base::GoalLazySamples*, ompl::base::State*)',
'GoalSamplingFn', 'Goal sampling function')
self.add_function_wrapper('void(const ompl::base::State*)',
'NewStateCallbackFn', 'New state callback function')
self.add_function_wrapper('ompl::base::PlannerPtr(const ompl::base::SpaceInformationPtr&)',
'PlannerAllocator', 'Planner allocator')
self.add_function_wrapper('bool()',
'PlannerTerminationConditionFn','Planner termination condition function')
self.add_function_wrapper('bool(const ompl::base::State*)',
'StateValidityCheckerFn', 'State validity checker function')
self.add_function_wrapper('ompl::base::StateSamplerPtr(const ompl::base::StateSpace*)',
'StateSamplerAllocator', 'State sampler allocator')
self.add_function_wrapper('ompl::base::ValidStateSamplerPtr(ompl::base::SpaceInformation const*)',
'ValidStateSamplerAllocator', 'Valid state allocator function')
self.add_function_wrapper('double(const ompl::base::PlannerDataVertex&, const ompl::base::PlannerDataVertex&, const ompl::base::PlannerDataEdge&)',
'EdgeWeightFn', 'Edge weight function')
self.add_function_wrapper('ompl::base::Cost(const ompl::base::State*, const ompl::base::Goal*)',
'CostToGoHeuristic', 'Cost-to-go heuristic for optimizing planners')
self.add_function_wrapper('std::string()', 'PlannerProgressProperty',
'Function that returns stringified value of a property while a planner is running')
# rename SamplerSelectors
self.ompl_ns.class_('SamplerSelector< ompl::base::StateSampler >').rename('StateSamplerSelector')
self.ompl_ns.class_('SamplerSelector< ompl::base::ValidStateSampler >').rename('ValidStateSamplerSelector')
try:
cls = self.ompl_ns.class_('StateStorage').member_functions('load')
self.ompl_ns.class_('StateStorage').member_function('load', arg_types=['::std::istream &']).exclude()
self.ompl_ns.class_('StateStorage').member_function('store', arg_types=['::std::ostream &']).exclude()
except:
pass
class ompl_control_generator_t(code_generator_t):
def __init__(self):
replacement = default_replacement
# A C++ call like "foo.printControl(control, std::cout)" will be replaced with
# something more pythonesque: "print(foo.string(control))"
replacement['printControl'] = ('def("string", &__printControl)', """
std::string __printControl(%s* space, ompl::control::Control* control)
{
std::ostringstream s;
space->printControl(control, s);
return s.str();
}
""")
replacement['printAsMatrix'] = ('def("printAsMatrix", &__printAsMatrix)', """
std::string __printAsMatrix(%s* path)
{
std::ostringstream s;
path->printAsMatrix(s);
return s.str();
}
""")
replacement['::ompl::control::ODESolver::getStatePropagator'] = ("""
def("getStatePropagator", &getStatePropagator1);
ODESolver_exposer.def("getStatePropagator", &getStatePropagator2);
ODESolver_exposer.staticmethod( "getStatePropagator" )""", """
// %s
ompl::control::StatePropagatorPtr getStatePropagator2(ompl::control::ODESolverPtr solver,
const ompl::control::ODESolver::PostPropagationEvent &postEvent)
{
return ompl::control::ODESolver::getStatePropagator(solver, postEvent);
}
ompl::control::StatePropagatorPtr getStatePropagator1(ompl::control::ODESolverPtr solver)
{
return ompl::control::ODESolver::getStatePropagator(solver);
}
""")
code_generator_t.__init__(self, 'control', ['bindings/util', 'bindings/base', 'bindings/geometric'], replacement)
def filter_declarations(self):
code_generator_t.filter_declarations(self)
# rename STL vectors of certain types
self.std_ns.class_('vector< ompl::control::Control * >').rename('vectorControlPtr')
# don't export variables that need a wrapper
self.ompl_ns.variables(lambda decl: decl.is_wrapper_needed()).exclude()
# force ControlSpace::allocControl to be exported.
# (not sure why this is necessary)
allocControlFn = self.ompl_ns.class_('ControlSpace').member_function('allocControl')
allocControlFn.include()
allocControlFn.call_policies = \
call_policies.return_value_policy(call_policies.reference_existing_object)
# don't export components which is of type Control**
self.ompl_ns.class_('CompoundControl').variable('components').exclude()
# don't export some internal data structure
self.ompl_ns.class_('OrderCellsByImportance').exclude()
# don't expose this utility function
self.ompl_ns.member_functions('getValueAddressAtIndex').exclude()
self.ompl_ns.class_('KPIECE1').member_functions('freeGridMotions').exclude()
# add array indexing to the RealVectorState
self.add_array_access(self.ompl_ns.class_('RealVectorControlSpace').class_('ControlType'))
# make objects printable that have a print function
self.replace_member_functions(self.ompl_ns.member_functions('print'))
# make settings printable
self.replace_member_functions(self.ompl_ns.member_functions('printSettings'))
# make controls printable
self.replace_member_functions(self.ompl_ns.member_functions('printControl'))
# print paths as matrices
self.replace_member_functions(self.ompl_ns.member_functions('printAsMatrix'))
# export ODESolver-derived classes that use Boost.OdeInt
for odesolver in ['ODEBasicSolver', 'ODEErrorSolver', 'ODEAdaptiveSolver']:
self.ompl_ns.class_(lambda cls: cls.name.startswith(odesolver)).rename(odesolver)
self.add_function_wrapper('void(const ompl::control::ODESolver::StateType &, const ompl::control::Control*, ompl::control::ODESolver::StateType &)',
'ODE','Ordinary differential equation')
# workaround for default argument for PostPropagationEvent
self.replace_member_function(self.ompl_ns.class_('ODESolver').member_function(
'getStatePropagator'))
# LLVM's clang++ compiler doesn't like exporting this method because
# the argument type (Grid::Cell) is protected
self.ompl_ns.member_functions('computeImportance').exclude()
# export pure virtual member functions, otherwise code doesn't compile
syclop = self.ompl_ns.class_('Syclop')
syclop.add_wrapper_code("""
virtual ompl::control::Syclop::Motion* addRoot(const ompl::base::State* s)
{
bp::override func_addRoot = this->get_override("addRoot");
return func_addRoot(s);
}
virtual void selectAndExtend(ompl::control::Syclop::Region& region, std::vector<ompl::control::Syclop::Motion*>& newMotions)
{
bp::override func_selectAndExtend = this->get_override("selectAndExtend");
func_selectAndExtend(region, newMotions);
}""")
# omit ompl::control::Syclop::Defaults nested subclass, otherwise
# code doesn't compile (don't know why)
syclop.class_('Defaults').exclude()
# add wrappers for std::function types
self.add_function_wrapper('ompl::control::ControlSamplerPtr(const ompl::control::ControlSpace*)',
'ControlSamplerAllocator', 'Control sampler allocator')
self.add_function_wrapper('ompl::control::DirectedControlSamplerPtr(const ompl::control::SpaceInformation*)',
'DirectedControlSamplerAllocator','Directed control sampler allocator')
# same type as StatePropagatorFn, so no need to export this. Instead, we just define a type alias in the python module.
#self.add_function_wrapper('void(const ompl::base::State*, const ompl::control::Control*, const double, ompl::base::State*)',
# 'PostPropagationEvent','Post-propagation event')
self.add_function_wrapper('void(const ompl::base::State*, const ompl::control::Control*, const double, ompl::base::State*)',
'StatePropagatorFn','State propagator function')
self.add_function_wrapper('double(int, int)','EdgeCostFactorFn',
'Syclop edge cost factor function')
self.add_function_wrapper('void(int, int, std::vector<int>&)','LeadComputeFn',
'Syclop lead compute function')
# code generation fails to compile, most likely because of a bug in
# Py++'s generation of exposed_decl.pypp.txt.
self.ompl_ns.member_functions('getPlannerAllocator').exclude()
self.ompl_ns.member_functions('setPlannerAllocator').exclude()
self.ompl_ns.namespace('control').class_('SimpleSetup').add_registration_code(
'def("setPlannerAllocator", &ompl::control::SimpleSetup::setPlannerAllocator)')
self.ompl_ns.namespace('control').class_('SimpleSetup').add_registration_code(
'def("getPlannerAllocator", &ompl::control::SimpleSetup::getPlannerAllocator, bp::return_value_policy< bp::copy_const_reference >())')
# Do this for all classes that exist with the same name in another namespace
# (We also do it for all planners; see below)
for cls in ['SimpleSetup', 'SpaceInformation']:
self.ompl_ns.namespace('control').class_(cls).wrapper_alias = 'Control%s_wrapper' % cls
# Py++ seems to get confused by some methods declared in one module
# that are *not* overridden in a derived class in another module. The
# Planner class is defined in ompl::base and two of its virtual methods,
# setProblemDefinition and checkValidity, and not overridden by most
# planners. The code below forces Py++ to do the right thing (or at
# least make it work). It seems rather hacky and there may be a better
# solution.
# do this for all planners
for planner in ['KPIECE1', 'PDST', 'RRT', 'EST', 'Syclop', 'SyclopEST', 'SyclopRRT','SST']:
# many planners exist with the same name in another namespace
self.ompl_ns.namespace('control').class_(planner).wrapper_alias = 'Control%s_wrapper' % planner
self.ompl_ns.class_(planner).add_registration_code("""
def("solve", (::ompl::base::PlannerStatus(::ompl::base::Planner::*)( double ))(&::ompl::base::Planner::solve), (bp::arg("solveTime")) )""")
self.ompl_ns.class_(planner).add_registration_code("""
def("setProblemDefinition",&::ompl::base::Planner::setProblemDefinition,
&Control%s_wrapper::default_setProblemDefinition, (bp::arg("pdef")) )""" % planner)
self.ompl_ns.class_(planner).add_registration_code("""
def("checkValidity",&::ompl::base::Planner::checkValidity,
&Control%s_wrapper::default_checkValidity )""" % planner)
class ompl_geometric_generator_t(code_generator_t):
def __init__(self):
replacement = default_replacement
replacement['printAsMatrix'] = ('def("printAsMatrix", &__printAsMatrix)', """
std::string __printAsMatrix(%s* path)
{
std::ostringstream s;
path->printAsMatrix(s);
return s.str();
}
""")
replacement['printDebug'] = ('def("printDebug", &__printDebug)', """
std::string __printDebug(%s* obj)
{
std::ostringstream s;
obj->printDebug(s);
return s.str();
}
""")
code_generator_t.__init__(self, 'geometric', ['bindings/util', 'bindings/base'], replacement)
def filter_declarations(self):
code_generator_t.filter_declarations(self)
#self.ompl_ns.namespace('util').exclude()
# don't export variables that need a wrapper
self.ompl_ns.variables(lambda decl: decl.is_wrapper_needed()).exclude()
# make objects printable that have a print function
self.replace_member_functions(self.ompl_ns.member_functions('print'))
# print paths as matrices
self.replace_member_functions(self.ompl_ns.member_functions('printAsMatrix'))
# print debug info
self.replace_member_functions(self.ompl_ns.member_functions('printDebug'))
self.ompl_ns.member_functions('freeGridMotions').exclude()
self.ompl_ns.class_('PRM').member_functions('maybeConstructSolution').exclude()
self.ompl_ns.class_('PRM').member_functions('growRoadmap',
function=declarations.access_type_matcher_t('protected')).exclude()
self.ompl_ns.class_('PRM').member_functions('expandRoadmap',
function=declarations.access_type_matcher_t('protected')).exclude()
# don't export some internal data structure
self.ompl_ns.classes('OrderCellsByImportance').exclude()
# LLVM's clang++ compiler doesn't like exporting this method because
# the argument type (Grid::Cell) is protected
self.ompl_ns.member_functions('computeImportance').exclude()
# add wrappers for std::function types
self.add_function_wrapper('unsigned int()',
'NumNeighborsFn', 'Number of neighbors function')
# self.add_function_wrapper('std::vector<ompl::geometric::PRM::Vertex>&(const ompl::geometric::PRM::Vertex)',
# 'ConnectionStrategy', 'Connection strategy')
self.add_function_wrapper('bool(const ompl::geometric::PRM::Vertex&, const ompl::geometric::PRM::Vertex&)',
'ConnectionFilter', 'Connection filter')
# code generation fails to compile, most likely because of a bug in
# Py++'s generation of exposed_decl.pypp.txt.
self.ompl_ns.member_functions('getPlannerAllocator').exclude()
self.ompl_ns.member_functions('setPlannerAllocator').exclude()
self.ompl_ns.namespace('geometric').class_('SimpleSetup').add_registration_code(
'def("setPlannerAllocator", &ompl::geometric::SimpleSetup::setPlannerAllocator)')
self.ompl_ns.namespace('geometric').class_('SimpleSetup').add_registration_code(
'def("getPlannerAllocator", &ompl::geometric::SimpleSetup::getPlannerAllocator, bp::return_value_policy< bp::copy_const_reference >())')
self.std_ns.class_('vector< std::shared_ptr<ompl::geometric::BITstar::Vertex> >').exclude()
self.std_ns.class_('vector<const ompl::base::State *>').exclude()
# Py++ seems to get confused by some methods declared in one module
# that are *not* overridden in a derived class in another module. The
# Planner class is defined in ompl::base and two of its virtual methods,
# setProblemDefinition and checkValidity, and not overridden by most
# planners. The code below forces Py++ to do the right thing (or at
# least make it work). It seems rather hacky and there may be a better
# solution.
# do this for all planners
for planner in ['EST', 'BiEST', 'ProjEST', 'KPIECE1', 'BKPIECE1', 'LBKPIECE1', 'PRM', 'LazyPRM', 'LazyPRMstar', 'PDST', 'LazyRRT', 'RRT', 'RRTConnect', 'TRRT', 'RRTstar', 'RRTXstatic', 'RRTsharp','LBTRRT', 'SBL', 'SPARS', 'SPARStwo', 'STRIDE', 'FMT', 'BFMT', 'InformedRRTstar', 'SORRTstar', 'BITstar', 'SST']:
try:
cls = self.ompl_ns.class_(planner)
except:
continue
self.ompl_ns.class_(planner).add_registration_code("""
def("solve", (::ompl::base::PlannerStatus(::ompl::base::Planner::*)( double ))(&::ompl::base::Planner::solve), (bp::arg("solveTime")) )""")
if planner!='PRM':
# PRM overrides setProblemDefinition, so we don't need to add this code
self.ompl_ns.class_(planner).add_registration_code("""
def("setProblemDefinition",&::ompl::base::Planner::setProblemDefinition,
&%s_wrapper::default_setProblemDefinition, (bp::arg("pdef")) )""" % planner)
self.ompl_ns.class_(planner).add_registration_code("""
def("checkValidity",&::ompl::base::Planner::checkValidity,
&%s_wrapper::default_checkValidity )""" % planner)
# The OMPL implementation of PRM uses two threads: one for constructing
# the roadmap and another for checking for a solution. This causes
# problems when both threads try to access the python interpreter
# simultaneously. This is a known limitation of Boost.Python. We
# therefore use a single-threaded version of PRM in python.
PRM_cls = self.ompl_ns.class_('PRM')
PRM_cls.member_function('solve').exclude()
PRM_cls.add_wrapper_code("""
virtual ::ompl::base::PlannerStatus solve( ::ompl::base::PlannerTerminationCondition const & ptc ) {
if( bp::override func_solve = this->get_override( "solve" ) )
return func_solve( boost::ref(ptc) );
else{
return default_solve( boost::ref(ptc) );
}
}
::ompl::base::PlannerStatus default_solve( ::ompl::base::PlannerTerminationCondition const & ptc );
""")
PRM_cls.add_declaration_code(open(join(dirname(__file__),
'PRM.SingleThreadSolve.cpp'),'r').read())
# This needs to be the last registration code added to the PRM_cls to the ugly hack below.
PRM_cls.add_registration_code("""def("solve",
(::ompl::base::PlannerStatus(::ompl::geometric::PRM::*)( ::ompl::base::PlannerTerminationCondition const &))(&PRM_wrapper::solve),
(::ompl::base::PlannerStatus(PRM_wrapper::*)( ::ompl::base::PlannerTerminationCondition const & ))(&PRM_wrapper::default_solve), bp::arg("ptc") );
// HACK ALERT: closing brace destroys bp::scope, so that PRMstar is not a nested class of PRM
}
{
// wrapper for PRMstar, derived from single-threaded PRM_wrapper
bp::class_<PRMstar_wrapper, bp::bases< PRM_wrapper >, boost::noncopyable >("PRMstar", bp::init< ompl::base::SpaceInformationPtr const & >( bp::arg("si") ) )
""")
# Add wrapper code for PRM*
PRM_cls.add_declaration_code("""
class PRMstar_wrapper : public PRM_wrapper
{
public:
PRMstar_wrapper(const ompl::base::SpaceInformationPtr &si) : PRM_wrapper(si, true)
{
setName("PRMstar");
params_.remove("max_nearest_neighbors");
}
};
""")
# LazyPRM's Vertex type is void* so exclude addMilestone which has return type void*
self.ompl_ns.class_('LazyPRM').member_function('addMilestone').exclude()
# avoid difficulties in exporting the return type std::vector<base::PlannerDataPtr>
# do this for all multithreaded planners
for planner in ['SPARS', 'SPARStwo']:
cls = self.ompl_ns.class_(planner)
cls.constructor(arg_types=["::ompl::base::SpaceInformationPtr const &"]).exclude()
cls.add_registration_code('def(bp::init<ompl::base::SpaceInformationPtr const &>(bp::arg("si")))')
cls.add_wrapper_code("""
{0}_wrapper(::ompl::base::SpaceInformationPtr const &si) : ompl::geometric::{0}(si),
bp::wrapper<ompl::geometric::{0}>()
{{
OMPL_WARN("%s: this planner uses multiple threads and might crash if your StateValidityChecker, OptimizationObjective, etc., are allocated within Python.", getName().c_str());
}}
""".format(planner))
# exclude methods that use problematic types
cls = self.ompl_ns.class_('SPARS')
cls.member_function('addPathToSpanner').exclude()
cls.member_function('computeDensePath').exclude()
self.ompl_ns.class_('SPARStwo').member_function('findCloseRepresentatives').exclude()
# needed to able to set connection strategy for PRM
# the PRM::Vertex type is typedef-ed to boost::graph_traits<Graph>::vertex_descriptor. This can
# be equal to an unsigned long or unsigned int, depending on architecture (or version of boost?)
try:
self.ompl_ns.class_('NearestNeighbors<unsigned long>').include()
self.ompl_ns.class_('NearestNeighbors<unsigned long>').rename('NearestNeighbors')
self.ompl_ns.class_('NearestNeighborsLinear<unsigned long>').rename('NearestNeighborsLinear')
self.ompl_ns.class_('KStrategy<unsigned long>').rename('KStrategy')
self.ompl_ns.class_('KStarStrategy<unsigned long>').rename('KStarStrategy')
except:
self.ompl_ns.class_('NearestNeighbors<unsigned int>').include()
self.ompl_ns.class_('NearestNeighbors<unsigned int>').rename('NearestNeighbors')
self.ompl_ns.class_('NearestNeighborsLinear<unsigned int>').rename('NearestNeighborsLinear')
self.ompl_ns.class_('KStrategy<unsigned int>').rename('KStrategy')
self.ompl_ns.class_('KStarStrategy<unsigned int>').rename('KStarStrategy')
try:
# Exclude some functions from BIT* that cause some Py++ compilation problems:
self.ompl_ns.class_('BITstar').member_functions('getEdgeQueue').exclude() #I don't know why this doesn't work.
self.ompl_ns.class_('BITstar').member_functions('getVertexQueue').exclude() #I don't know why this doesn't work.
except:
pass
class ompl_tools_generator_t(code_generator_t):
def __init__(self):
replacement = default_replacement
replacement['::ompl::tools::Benchmark::benchmark'] = ('def("benchmark", &benchmarkWrapper)', """
void benchmarkWrapper(%s* obj, const ompl::tools::Benchmark::Request& request)
{
ompl::tools::Benchmark::Request req(request);
req.useThreads = false;
obj->benchmark(request);
}
""")
replacement['printResultsInfo'] = ('def("printResultsInfo", &__printResultsInfo)', """
std::string __printResultsInfo(%s* obj)
{
std::ostringstream s;
obj->printResultsInfo(s);
return s.str();
}
""")
replacement['printLogs'] = ('def("printLogs", &__printLogs)', """
std::string __printLogs(%s* obj)
{
std::ostringstream s;
obj->printLogs(s);
return s.str();
}
""")
replacement['saveDataLog'] = ('def("saveDataLog", &__saveDataLog)', """
std::string __saveDataLog(%s* obj)
{
std::ostringstream s;
obj->saveDataLog(s);
return s.str();
}
""")
code_generator_t.__init__(self, 'tools',
['bindings/util', 'bindings/base', 'bindings/geometric', 'bindings/control'], replacement, 1)
def filter_declarations(self):
code_generator_t.filter_declarations(self)
# rename STL vectors/maps of certain types
self.std_ns.class_('vector< ompl::tools::Benchmark::PlannerExperiment >').rename('vectorPlannerExperiment')
self.std_ns.class_('vector< std::vector< std::map<std::string, std::string> > >').rename('vectorRunProgressData')
# make objects printable that have a print function
self.replace_member_functions(self.ompl_ns.member_functions('print'))
benchmark_cls = self.ompl_ns.class_('Benchmark')
self.replace_member_function(benchmark_cls.member_function('benchmark'))
# next five statements take care of weird error in default value for name argument in constructor
benchmark_cls.constructors().exclude()
benchmark_cls.add_registration_code(
'def(bp::init< ompl::geometric::SimpleSetup &, bp::optional< std::string const & > >(( bp::arg("setup"), bp::arg("name")=std::basic_string<char, std::char_traits<char>, std::allocator<char> >() )) )')
benchmark_cls.add_wrapper_code(
"""Benchmark_wrapper(::ompl::geometric::SimpleSetup & setup, const ::std::string & name=std::string() )
: ompl::tools::Benchmark( boost::ref(setup), name )
, bp::wrapper< ompl::tools::Benchmark >(){}""")
benchmark_cls.add_registration_code(
'def(bp::init< ompl::control::SimpleSetup &, bp::optional< std::string const & > >(( bp::arg("setup"), bp::arg("name")=std::basic_string<char, std::char_traits<char>, std::allocator<char> >() )) )')
benchmark_cls.add_wrapper_code(
"""Benchmark_wrapper(::ompl::control::SimpleSetup & setup, const ::std::string & name=std::string() )
: ompl::tools::Benchmark( boost::ref(setup), name )
, bp::wrapper< ompl::tools::Benchmark >(){}""")
# don't want to export iostream
benchmark_cls.member_function('saveResultsToStream').exclude()
self.ompl_ns.member_functions('addPlannerAllocator').exclude()
benchmark_cls.member_functions(lambda method: method.name.startswith('set') and method.name.endswith('Event')).exclude()
benchmark_cls.add_registration_code(
'def("addPlannerAllocator", &ompl::tools::Benchmark::addPlannerAllocator)')
self.ompl_ns.class_('OptimizePlan').add_registration_code(
'def("addPlannerAllocator", &ompl::tools::OptimizePlan::addPlannerAllocator)')
benchmark_cls.add_registration_code(
'def("setPlannerSwitchEvent", &ompl::tools::Benchmark::setPlannerSwitchEvent)')
benchmark_cls.add_registration_code(
'def("setPreRunEvent", &ompl::tools::Benchmark::setPreRunEvent)')
benchmark_cls.add_registration_code(
'def("setPostRunEvent", &ompl::tools::Benchmark::setPostRunEvent)')
self.add_function_wrapper('void(const ompl::base::PlannerPtr&)',
'PreSetupEvent', 'Pre-setup event')
self.add_function_wrapper('void(const ompl::base::PlannerPtr&, ompl::tools::Benchmark::RunProperties&)',
'PostSetupEvent', 'Post-setup event')
benchmark_cls.class_('Request').no_init = False
class ompl_util_generator_t(code_generator_t):
def __init__(self):
replacement = default_replacement
code_generator_t.__init__(self, 'util', None, replacement, 1)
def filter_declarations(self):
code_generator_t.filter_declarations(self)
# rename STL vectors of certain types
self.std_ns.class_('vector< unsigned long >').include()
self.std_ns.class_('vector< unsigned long >').rename('vectorSizeT')
# not needed; causes problems when compiling in C++11 mode
#self.std_ns.class_('vector< bool >').include()
#self.std_ns.class_('vector< bool >').rename('vectorBool')
self.std_ns.class_('vector< int >').include()
self.std_ns.class_('vector< int >').rename('vectorInt')
self.std_ns.class_('vector< double >').include()
self.std_ns.class_('vector< double >').rename('vectorDouble')
self.std_ns.class_('vector< unsigned int >').include()
self.std_ns.class_('vector< unsigned int >').rename('vectorUint')
self.std_ns.class_('vector< std::string >').include()
self.std_ns.class_('vector< std::string >').rename('vectorString')
self.std_ns.class_('vector< std::vector<int> >').include()
self.std_ns.class_('vector< std::vector<int> >').rename('vectorVectorInt')
self.std_ns.class_('vector< std::vector<unsigned int> >').include()
self.std_ns.class_('vector< std::vector<unsigned int> >').rename('vectorVectorUint')
self.std_ns.class_('vector< std::vector<double> >').include()
self.std_ns.class_('vector< std::vector<double> >').rename('vectorVectorDouble')
self.std_ns.class_('vector< std::map<std::string, std::string > >').include()
self.std_ns.class_('vector< std::map<std::string, std::string > >').rename('vectorMapStringToString')
self.std_ns.class_('map<std::string, std::string >').include()
self.std_ns.class_('map<std::string, std::string >').rename('mapStringToString')
self.std_ns.class_('vector< ompl::PPM::Color >').rename('vectorPPMColor')
try:
# Exclude the ProlateHyperspheroid Class which needs Eigen, and the associated member functions in the RNG
self.ompl_ns.class_('ProlateHyperspheroid').exclude()
self.ompl_ns.class_('RNG').member_functions('uniformProlateHyperspheroidSurface').exclude()
self.ompl_ns.class_('RNG').member_functions('uniformProlateHyperspheroid').exclude()
except:
pass
class ompl_morse_generator_t(code_generator_t):
def __init__(self):
replacement = default_replacement
code_generator_t.__init__(self, 'morse',
['bindings/util', 'bindings/base', 'bindings/geometric', 'bindings/control'],
replacement)
def filter_declarations(self):
stype = 'Morse'
# create a python type for each of the corresponding state type
state = self.ompl_ns.class_('ScopedState< ompl::base::%sStateSpace >' % stype)
state.rename(stype+'State')
state.operator('=', arg_types=['::ompl::base::State const &']).exclude()
# add a constructor that allows a MorseState to be constructed from a State
state.add_registration_code(
'def(bp::init<ompl::base::ScopedState<ompl::base::StateSpace> const &>(( bp::arg("other") )))')
# add a constructor that allows, e.g., a State to be constructed from a MorseState
bstate = self.ompl_ns.class_('ScopedState< ompl::base::StateSpace >')
bstate.add_registration_code(
'def(bp::init<ompl::base::ScopedState<ompl::base::%sStateSpace> const &>(( bp::arg("other") )))' % stype)
# add array access to double components of state
self.add_array_access(state,'double')
if __name__ == '__main__':
setrecursionlimit(50000)
if len(argv)==1:
print("Usage: generatebindings.py <modulename>")
else:
for module in argv[1:]:
try:
globals()['ompl_'+module+'_generator_t']()
except KeyError:
print("Error: can't generate code for module %s" % module)
|
|
from ply import lex
from ply import yacc
from DNAData import DNAData
from DNAGroup import DNAGroup
from DNAVisGroup import DNAVisGroup
from DNAProp import DNAProp
from DNASign import DNASign
from DNASignBaseline import DNASignBaseline
from DNASignText import DNASignText
from DNASignGraphic import DNASignGraphic
from DNANode import DNANode
from DNALandmarkBuilding import DNALandmarkBuilding
from DNADoor import DNADoor
from DNAFlatDoor import DNAFlatDoor
from DNAFlatBuilding import DNAFlatBuilding
from DNAWall import DNAWall
from DNAStreet import DNAStreet
from DNAWindows import DNAWindows
from DNACornice import DNACornice
tokens = [
'FLOAT',
'INTEGER',
'UNQUOTED_STRING',
'QUOTED_STRING'
]
reserved = {
'store_suit_point' : 'STORE_SUIT_POINT',
'group' : 'GROUP',
'visgroup' : 'VISGROUP',
'vis' : 'VIS',
'STREET_POINT' : 'STREET_POINT',
'FRONT_DOOR_POINT' : 'FRONT_DOOR_POINT',
'SIDE_DOOR_POINT' : 'SIDE_DOOR_POINT',
'COGHQ_IN_POINT' : 'COGHQ_IN_POINT',
'COGHQ_OUT_POINT' : 'COGHQ_OUT_POINT',
'suit_edge' : 'SUIT_EDGE',
'battle_cell' : 'BATTLE_CELL',
'prop' : 'PROP',
'pos' : 'POS',
'hpr' : 'HPR',
'scale' : 'SCALE',
'code' : 'CODE',
'color' : 'COLOR',
'model' : 'MODEL',
'store_node' : 'STORE_NODE',
'sign' : 'SIGN',
'baseline' : 'BASELINE',
'width' : 'WIDTH',
'height' : 'HEIGHT',
'stomp' : 'STOMP',
'stumble' : 'STUMBLE',
'indent' : 'INDENT',
'wiggle' : 'WIGGLE',
'kern' : 'KERN',
'text' : 'TEXT',
'letters' : 'LETTERS',
'store_font' : 'STORE_FONT',
'flat_building' : 'FLAT_BUILDING',
'wall' : 'WALL',
'windows' : 'WINDOWS',
'count' : 'COUNT',
'cornice' : 'CORNICE',
'landmark_building' : 'LANDMARK_BUILDING',
'title' : 'TITLE',
'article' : 'ARTICLE',
'building_type' : 'BUILDING_TYPE',
'door' : 'DOOR',
'store_texture' : 'STORE_TEXTURE',
'street' : 'STREET',
'texture' : 'TEXTURE',
'graphic' : 'GRAPHIC',
'hood_model' : 'HOODMODEL',
'place_model' : 'PLACEMODEL',
'nhpr' : 'NHPR',
'flags' : 'FLAGS',
'node' : 'NODE',
'flat_door' : 'FLAT_DOOR',
'anim' : 'ANIM',
'cell_id' : 'CELL_ID',
'anim_prop' : 'ANIM_PROP',
'interactive_prop' : 'INTERACTIVE_PROP',
'anim_building' : 'ANIM_BUILDING',
}
tokens += reserved.values()
t_ignore = ' \t'
literals = '[],'
def t_ignore_COMMENT(t):
pass
t_ignore_COMMENT.__doc__ = r'[/]{2,2}.*'
def t_ignore_ML_COMMENT(t):
pass
t_ignore_ML_COMMENT.__doc__ = r'\/\*([^*]|[\r\n])*\*/'
def t_QUOTED_STRING(t):
t.value = t.value[1:-1]
return t
t_QUOTED_STRING.__doc__ = r'["][^"]*["]'
def t_FLOAT(t):
t.value = float(t.value)
return t
t_FLOAT.__doc__ = r'[+-]?\d+[.]\d*([e][+-]\d+)?'
def t_INTEGER(t):
t.value = int(t.value)
return t
t_INTEGER.__doc__ = r'[+-]?\d+'
def t_UNQUOTED_STRING(t):
if t.value in reserved:
t.type = reserved[t.value]
return t
t_UNQUOTED_STRING.__doc__ = r'[^ \t\n\r\[\],"]+'
def t_newline(t):
t.lexer.lineno += len(t.value)
t_newline.__doc__ = r'\n+'
def t_error(t):
print 'Illegal character %s' % t.value[0]
t.lexer.skip(1)
# Build the lex
lex.lex()
# dictionary of names
names = { }
def p_dna(p):
pass
p_dna.__doc__ = \
'''dna : dna object
| object'''
def p_object(p):
p[0] = p[1]
p_object.__doc__ = \
'''object : suitpoint
| group
| model
| font
| store_texture'''
def p_number(p):
p[0] = p[1]
p_number.__doc__ = \
'''number : FLOAT
| INTEGER'''
def p_lpoint3f(p):
from panda3d.core import *
p[0] = LVector3f(p[1], p[2], p[3])
p_lpoint3f.__doc__ = '''lpoint3f : number number number'''
def p_suitpoint(p):
p.parser.dnaStore.storeSuitPoint(DNASuitPoint(p[3], p[5], p[7]))
p_suitpoint.__doc__ = \
'''suitpoint : STORE_SUIT_POINT "[" number "," suitpointtype "," lpoint3f "]"
| STORE_SUIT_POINT "[" number "," suitpointtype "," lpoint3f "," number "]"''' #last # is landmark building index
def p_suitpointtype(p):
p[0] = DNASuitPoint.pointTypeMap[p[1]]
p_suitpointtype.__doc__ = \
'''suitpointtype : STREET_POINT
| FRONT_DOOR_POINT
| SIDE_DOOR_POINT
| COGHQ_IN_POINT
| COGHQ_OUT_POINT'''
def p_string(p):
p[0] = p[1]
p_string.__doc__ = \
'''string : QUOTED_STRING
| UNQUOTED_STRING'''
def p_dnagroupdef(p):
p[0] = DNAGroup(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_dnagroupdef.__doc__ = '''dnagroupdef : GROUP string'''
def p_dnanodedef(p):
p[0] = DNANode(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_dnanodedef.__doc__ = '''dnanodedef : NODE string'''
def p_visgroupdef(p):
p[0] = DNAVisGroup(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_visgroupdef.__doc__ = '''visgroupdef : VISGROUP string'''
def p_dnagroup(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_dnagroup.__doc__ = '''dnagroup : dnagroupdef "[" subgroup_list "]"'''
def p_visgroup(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_visgroup.__doc__ = '''visgroup : visgroupdef "[" subvisgroup_list "]"'''
def p_string_opt_list(p):
if len(p) == 2:
p[0] = []
if len(p) == 3 and not p[2] is None:
p[0] = p[1]
p[0] += [p[2]]
p_string_opt_list.__doc__ = \
'''string_opt_list : string_opt_list string
| empty'''
def p_vis(p):
p.parser.parentGroup.addVisible(p[3])
for vis in p[4]:
p.parser.parentGroup.addVisible(vis)
p_vis.__doc__ = '''vis : VIS "[" string string_opt_list "]"'''
def p_empty(p):
pass
p_empty.__doc__ = \
'''empty : '''
def p_group(p):
p[0] = p[1]
p_group.__doc__ = \
'''group : dnagroup
| visgroup
| dnanode
| windows
| cornice
| door'''
def p_dnanode(p):
p[0] = p[1]
p_dnanode.__doc__ = \
'''dnanode : prop
| sign
| signbaseline
| signtext
| flatbuilding
| wall
| landmarkbuilding
| street
| signgraphic
| dnanode_grp'''
def p_dnanode_grp(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_dnanode_grp.__doc__ = '''dnanode_grp : dnanodedef "[" subdnanode_list "]"'''
def p_sign(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_sign.__doc__ = '''sign : signdef "[" subprop_list "]"'''
def p_signgraphic(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_signgraphic.__doc__ = '''signgraphic : signgraphicdef "[" subsigngraphic_list "]"'''
def p_prop(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_prop.__doc__ = \
'''prop : propdef "[" subprop_list "]"
| animpropdef "[" subanimprop_list "]"
| interactivepropdef "[" subinteractiveprop_list "]"'''
def p_signbaseline(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_signbaseline.__doc__ = '''signbaseline : baselinedef "[" subbaseline_list "]"'''
def p_signtest(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_signtest.__doc__ = '''signtext : signtextdef "[" subtext_list "]"'''
def p_flatbuilding(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_flatbuilding.__doc__ = '''flatbuilding : flatbuildingdef "[" subflatbuilding_list "]"'''
def p_wall(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_wall.__doc__ = '''wall : walldef "[" subwall_list "]"'''
def p_windows(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_windows.__doc__ = '''windows : windowsdef "[" subwindows_list "]"'''
def p_cornice(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_cornice.__doc__ = '''cornice : cornicedef "[" subcornice_list "]"'''
def p_landmarkbuilding(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_landmarkbuilding.__doc__ = '''landmarkbuilding : landmarkbuildingdef "[" sublandmarkbuilding_list "]"
| animbuildingdef "[" subanimbuilding_list "]"'''
def p_street(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_street.__doc__ = '''street : streetdef "[" substreet_list "]"'''
def p_door(p):
p[0] = p[1]
p.parser.parentGroup = p[0].getParent()
p_door.__doc__ = \
'''door : doordef "[" subdoor_list "]"
| flatdoordef "[" subdoor_list "]"'''
def p_propdef(p):
p[0] = DNAProp(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_propdef.__doc__ = '''propdef : PROP string'''
def p_animpropdef(p):
p[0] = DNAAnimProp(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_animpropdef.__doc__ = '''animpropdef : ANIM_PROP string'''
def p_interactivepropdef(p):
p[0] = DNAInteractiveProp(p[2])
p[0] = DNAInteractiveProp(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_interactivepropdef.__doc__ = '''interactivepropdef : INTERACTIVE_PROP string'''
def p_flatbuildingdef(p):
p[0] = DNAFlatBuilding(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_flatbuildingdef.__doc__ = '''flatbuildingdef : FLAT_BUILDING string'''
def p_walldef(p):
p[0] = DNAWall('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_walldef.__doc__ = '''walldef : WALL'''
def p_windowsdef(p):
p[0] = DNAWindows('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_windowsdef.__doc__ = '''windowsdef : WINDOWS'''
def p_cornicedef(p):
p[0] = DNACornice('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_cornicedef.__doc__ = '''cornicedef : CORNICE'''
def p_landmarkbuildingdef(p):
p[0] = DNALandmarkBuilding(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_landmarkbuildingdef.__doc__ = '''landmarkbuildingdef : LANDMARK_BUILDING string'''
def p_animbuildingdef(p):
p[0] = DNAAnimBuilding(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_animbuildingdef.__doc__ = '''animbuildingdef : ANIM_BUILDING string'''
def p_doordef(p):
p[0] = DNADoor('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_doordef.__doc__ = '''doordef : DOOR'''
def p_flatdoordef(p):
p[0] = DNAFlatDoor('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_flatdoordef.__doc__ = '''flatdoordef : FLAT_DOOR'''
def p_streetdef(p):
p[0] = DNAStreet(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_streetdef.__doc__ = '''streetdef : STREET string'''
def p_signdef(p):
p[0] = DNASign()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signdef.__doc__ = '''signdef : SIGN'''
def p_signgraphicdef(p):
p[0] = DNASignGraphic('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signgraphicdef.__doc__ = '''signgraphicdef : GRAPHIC'''
def p_baselinedef(p):
p[0] = DNASignBaseline()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_baselinedef.__doc__ = '''baselinedef : BASELINE'''
def p_signtextdef(p):
p[0] = DNASignText()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signtextdef.__doc__ = '''signtextdef : TEXT'''
def p_suitedge(p):
zoneId = p.parser.parentGroup.getName()
p.parser.dnaStore.storeSuitEdge(p[2], p[3], zoneId)
p_suitedge.__doc__ = '''suitedge : SUIT_EDGE "[" number number "]"'''
def p_battlecell(p):
p[0] = DNABattleCell(p[3], p[4], p[5])
p.parser.dnaStore.storeBattleCell(p[0])
p.parser.parentGroup.addBattleCell(p[0])
p_battlecell.__doc__ = '''battlecell : BATTLE_CELL "[" number number lpoint3f "]"'''
def p_subgroup_list(p):
p[0] = p[1]
if len(p) == 3:
p[0] += [p[2]]
else:
p[0] = []
p_subgroup_list.__doc__ = \
'''subgroup_list : subgroup_list group
| empty'''
def p_subvisgroup_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subvisgroup_list.__doc__ = \
'''subvisgroup_list : subvisgroup_list group
| subvisgroup_list suitedge
| subvisgroup_list battlecell
| subvisgroup_list vis
| empty'''
def p_pos(p):
p.parser.parentGroup.setPos(p[3])
p_pos.__doc__ = '''pos : POS "[" lpoint3f "]"'''
def p_hpr(p):
p.parser.parentGroup.setHpr(p[3])
p_hpr.__doc__ = \
'''hpr : HPR "[" lpoint3f "]"
| NHPR "[" lpoint3f "]"'''
def p_scale(p):
p.parser.parentGroup.setScale(p[3])
p_scale.__doc__ = '''scale : SCALE "[" lpoint3f "]"'''
def p_flags(p):
p.parser.parentGroup.setFlags(p[3])
p_flags.__doc__ = '''flags : FLAGS "[" string "]"'''
def p_dnanode_sub(p):
p[0] = p[1]
p_dnanode_sub.__doc__ = \
'''dnanode_sub : group
| pos
| hpr
| scale'''
def p_dnaprop_sub(p):
p[0] = p[1]
p_dnaprop_sub.__doc__ = \
'''dnaprop_sub : code
| color'''
def p_dnaanimprop_sub(p):
p[0] = p[1]
p_dnaanimprop_sub.__doc__ = '''dnaanimprop_sub : anim'''
def p_dnainteractiveprop_sub(p):
p[0] = p[1]
p_dnainteractiveprop_sub.__doc__ = '''dnainteractiveprop_sub : cell_id'''
def p_anim(p):
p.parser.parentGroup.setAnim(p[3])
p_anim.__doc__ = '''anim : ANIM "[" string "]"'''
def p_cell_id(p):
p.parser.parentGroup.setCellId(p[3])
p_cell_id.__doc__ = '''cell_id : CELL_ID "[" number "]"'''
def p_baseline_sub(p):
p[0] = p[1]
p_baseline_sub.__doc__ = \
'''baseline_sub : code
| color
| width
| height
| indent
| kern
| stomp
| stumble
| wiggle
| flags'''
def p_text_sub(p):
p[0] = p[1]
p_text_sub.__doc__ = '''text_sub : letters'''
def p_signgraphic_sub(p):
p[0] = p[1]
p_signgraphic_sub.__doc__ = \
'''signgraphic_sub : width
| height
| code
| color'''
def p_flatbuilding_sub(p):
p[0] = p[1]
p_flatbuilding_sub.__doc__ = '''flatbuilding_sub : width'''
def p_wall_sub(p):
p[0] = p[1]
p_wall_sub.__doc__ = \
'''wall_sub : height
| code
| color'''
def p_windows_sub(p):
p[0] = p[1]
p_windows_sub.__doc__ = \
'''windows_sub : code
| color
| windowcount'''
def p_cornice_sub(p):
p[0] = p[1]
p_cornice_sub.__doc__ = \
'''cornice_sub : code
| color'''
def p_landmarkbuilding_sub(p):
p[0] = p[1]
p_landmarkbuilding_sub.__doc__ = \
'''landmarkbuilding_sub : code
| title
| article
| building_type
| wall_color'''
def p_animbuilding_sub(p):
p[0] = p[1]
p_animbuilding_sub.__doc__ = \
'''animbuilding_sub : anim'''
def p_door_sub(p):
p[0] = p[1]
p_door_sub.__doc__ = \
'''door_sub : code
| color'''
def p_street_sub(p):
p[0] = p[1]
p_street_sub.__doc__ = \
'''street_sub : code
| texture
| color'''
def p_texture(p):
p.parser.parentGroup.setTexture(p[3])
p_texture.__doc__ = '''texture : TEXTURE "[" string "]"'''
def p_title(p):
p.parser.parentGroup.setTitle(p[3])
p_title.__doc__ = '''title : TITLE "[" string "]"'''
def p_article(p):
p.parser.parentGroup.setArticle(p[3])
p_article.__doc__ = '''article : ARTICLE "[" string "]"'''
def p_building_type(p):
p.parser.parentGroup.setBuildingType(p[3])
p_building_type.__doc__ = '''building_type : BUILDING_TYPE "[" string "]"'''
def p_wall_color(p):
p.parser.parentGroup.setWallColor((p[3],p[4],p[5],p[6]))
p_wall_color.__doc__ = '''wall_color : COLOR "[" number number number number "]"'''
def p_count(p):
p.parser.parentGroup.setWindowCount(p[3])
p_count.__doc__ = '''windowcount : COUNT "[" number "]"'''
def p_letters(p):
p.parser.parentGroup.setLetters(p[3])
p_letters.__doc__ = '''letters : LETTERS "[" string "]"'''
def p_width(p):
p.parser.parentGroup.setWidth(p[3])
p_width.__doc__ = '''width : WIDTH "[" number "]"'''
def p_height(p):
p.parser.parentGroup.setHeight(p[3])
p_height.__doc__ = '''height : HEIGHT "[" number "]"'''
def p_stomp(p):
p.parser.parentGroup.setStomp(p[3])
p_stomp.__doc__ = '''stomp : STOMP "[" number "]"'''
def p_indent(p):
p.parser.parentGroup.setIndent(p[3])
p_indent.__doc__ = '''indent : INDENT "[" number "]"'''
def p_kern(p):
p.parser.parentGroup.setKern(p[3])
p_kern.__doc__ = '''kern : KERN "[" number "]"'''
def p_stumble(p):
p.parser.parentGroup.setStumble(p[3])
p_stumble.__doc__ = '''stumble : STUMBLE "[" number "]"'''
def p_wiggle(p):
p.parser.parentGroup.setWiggle(p[3])
p_wiggle.__doc__ = '''wiggle : WIGGLE "[" number "]"'''
def p_code(p):
p.parser.parentGroup.setCode(p[3])
p_code.__doc__ = '''code : CODE "[" string "]"'''
def p_color(p):
p.parser.parentGroup.setColor((p[3],p[4],p[5],p[6]))
p_color.__doc__ = '''color : COLOR "[" number number number number "]"'''
def p_subprop_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subprop_list.__doc__ = \
'''subprop_list : subprop_list dnanode_sub
| subprop_list dnaprop_sub
| empty'''
def p_subanimprop_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subanimprop_list.__doc__ = \
'''subanimprop_list : subanimprop_list dnanode_sub
| subanimprop_list dnaprop_sub
| subanimprop_list dnaanimprop_sub
| empty'''
def p_subinteractiveprop_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subinteractiveprop_list.__doc__ = \
'''subinteractiveprop_list : subinteractiveprop_list dnanode_sub
| subinteractiveprop_list dnaprop_sub
| subinteractiveprop_list dnaanimprop_sub
| subinteractiveprop_list dnainteractiveprop_sub
| empty'''
def p_subbaseline_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subbaseline_list.__doc__ = \
'''subbaseline_list : subbaseline_list dnanode_sub
| subbaseline_list baseline_sub
| empty'''
def p_subtext_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subtext_list.__doc__ = \
'''subtext_list : subtext_list dnanode_sub
| subtext_list text_sub
| empty'''
def p_subdnanode_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subdnanode_list.__doc__ = \
'''subdnanode_list : subdnanode_list dnanode_sub
| empty'''
def p_subsigngraphic_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subsigngraphic_list.__doc__ = \
'''subsigngraphic_list : subsigngraphic_list dnanode_sub
| subsigngraphic_list signgraphic_sub
| empty'''
def p_subflatbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subflatbuilding_list.__doc__ = \
'''subflatbuilding_list : subflatbuilding_list dnanode_sub
| subflatbuilding_list flatbuilding_sub
| empty'''
def p_subwall_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subwall_list.__doc__ = \
'''subwall_list : subwall_list dnanode_sub
| subwall_list wall_sub
| empty'''
def p_subwindows_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subwindows_list.__doc__ = \
'''subwindows_list : subwindows_list dnanode_sub
| subwindows_list windows_sub
| empty'''
def p_subcornice_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subcornice_list.__doc__ = \
'''subcornice_list : subcornice_list dnanode_sub
| subcornice_list cornice_sub
| empty'''
def p_sublandmarkbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_sublandmarkbuilding_list.__doc__ = \
'''sublandmarkbuilding_list : sublandmarkbuilding_list dnanode_sub
| sublandmarkbuilding_list landmarkbuilding_sub
| empty'''
def p_subanimbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subanimbuilding_list.__doc__ = \
'''subanimbuilding_list : subanimbuilding_list dnanode_sub
| subanimbuilding_list landmarkbuilding_sub
| subanimbuilding_list animbuilding_sub
| empty'''
def p_subdoor_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_subdoor_list.__doc__ = \
'''subdoor_list : subdoor_list dnanode_sub
| subdoor_list door_sub
| empty'''
def p_substreet_list(p):
p[0] = p[1]
if len(p) == 3:
if isinstance(p[2], DNAGroup):
p[0] += [p[2]]
else:
p[0] = []
p_substreet_list.__doc__ = \
'''substreet_list : substreet_list dnanode_sub
| substreet_list street_sub
| empty'''
def p_modeldef(p): #TODO
p.parser.modelType = p[1]
#filename = Filename(p[2])
#filename.setExtension('bam')
#p.parser.nodePath = base.loader.loadModel(filename)
#p.parser.modelType = p[1]
p_modeldef.__doc__ = \
'''modeldef : MODEL string
| HOODMODEL string
| PLACEMODEL string'''
def p_model(p):
pass
p_model.__doc__ = '''model : modeldef "[" modelnode_list "]"'''
def p_modelnode_list(p):
pass
p_modelnode_list.__doc__ = \
'''modelnode_list : modelnode_list node
| empty'''
def p_node(p): #TODO!
nodePath = None
search = ''
code = ''
root = ''
if len(p) == 6:
search = p[4]
code = search
root = p[3]
else:
search = p[5]
code = p[4]
root = p[3]
p.parser.dnaStore.storeCatalogCode(root, code)
#if search != '':
# nodePath = p.parser.nodePath.find('**/' + search)
#else:
nodePath = p.parser.nodePath
#nodePath.setTag('DNACode', p[4])
if p.parser.modelType == 'hood_model':
p.parser.dnaStore.storeHoodNode(nodePath, p[4])
elif p.parser.modelType == 'place_model':
p.parser.dnaStore.storePlaceNode(nodePath, p[4])
else:
p.parser.dnaStore.storeNode(nodePath, p[4])
p_node.__doc__ = \
'''node : STORE_NODE "[" string string "]"
| STORE_NODE "[" string string string "]"'''
def p_store_texture(p):
filename = p[4]
if len(p) == 7:
filename = p[5]
name = p[3]
if len(p) == 7:
name = p[4]
p.parser.dnaStore.storeCatalogCode(p[3], name)
#texture = TexturePool.loadTexture(Filename(filename))
p.parser.dnaStore.storeTexture(name, filename)
p_store_texture.__doc__ = \
'''store_texture : STORE_TEXTURE "[" string string "]"
| STORE_TEXTURE "[" string string string "]"'''
def p_font(p):
p.parser.dnaStore.storeFont(p[4].capitalize())
#filename = Filename(p[5])
#filename.setExtension('bam')
#p.parser.dnaStore.storeFont(FontPool.loadFont(filename.cStr()), p[4])
p_font.__doc__ = \
'''font : STORE_FONT "[" string string string "]"'''
def p_error(p):
print "Syntax error at '%s'" % p.value
|
|
from xml.dom import minidom, Node, XML_NAMESPACE, XMLNS_NAMESPACE
import new
import re
import _base
from html5lib import constants, ihatexml
from html5lib.constants import namespaces
moduleCache = {}
def getDomModule(DomImplementation):
name = "_" + DomImplementation.__name__+"builder"
if name in moduleCache:
return moduleCache[name]
else:
mod = new.module(name)
objs = getDomBuilder(DomImplementation)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
def getDomBuilder(DomImplementation):
Dom = DomImplementation
infoset_filter = ihatexml.InfosetFilter()
class AttrList:
def __init__(self, element):
self.element = element
def __iter__(self):
return self.element.attributes.items().__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(infoset_filter.coerceAttribute(name),
infoset_filter.coerceCharacters(value))
def items(self):
return [(infoset_filter.fromXmlName(item[0]), item[1]) for item in
self.element.attributes.items()]
def keys(self):
return [infoset_filter.fromXmlName(item) for item in
self.element.attributes.keys()]
def __getitem__(self, name):
name = infoset_filter.toXmlName(name)
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(infoset_filter.toXmlName(name))
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.localName)
self.element = element
namespace = property(lambda self:hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
data=infoset_filter.coerceCharacters(data)
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in attributes.items():
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" +
infoset_filter.coerceAttribute(
name[1]))
else:
qualifiedName = infoset_filter.coerceAttribute(
name[1])
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
infoset_filter.coerceAttribute(name), value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None,None,None)
return self
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data=infoset_filter.coerceCharacters(data)
if parent <> self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if not Node.TEXT_NODE in self.dom._child_node_types:
self.dom._child_node_types=list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append( """|%s<!DOCTYPE %s "%s" "%s">"""%(
' '*indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>"%(' '*indent, element.name))
else:
rv.append("|%s<!DOCTYPE >"%(' '*indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->"%(' '*indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" %(' '*indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI not in (None,
constants.namespaces["html"])):
name = "%s %s"%(constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>"%(' '*indent, name))
if element.hasAttributes():
i = 0
attr = element.attributes.item(i)
while attr:
name = infoset_filter.fromXmlName(attr.localName)
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s"%(constants.prefixes[ns], name)
i += 1
attr = element.attributes.item(i)
rv.append('|%s%s="%s"' % (' '*(indent+2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
def dom2sax(node, handler, nsmap={'xml':XML_NAMESPACE}):
if node.nodeType == Node.ELEMENT_NODE:
if not nsmap:
handler.startElement(node.nodeName, node.attributes)
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endElement(node.nodeName)
else:
attributes = dict(node.attributes.itemsNS())
# gather namespace declarations
prefixes = []
for attrname in node.attributes.keys():
attr = node.getAttributeNode(attrname)
if (attr.namespaceURI == XMLNS_NAMESPACE or
(attr.namespaceURI == None and attr.nodeName.startswith('xmlns'))):
prefix = (attr.localName != 'xmlns' and attr.localName or None)
handler.startPrefixMapping(prefix, attr.nodeValue)
prefixes.append(prefix)
nsmap = nsmap.copy()
nsmap[prefix] = attr.nodeValue
del attributes[(attr.namespaceURI, attr.localName)]
# apply namespace declarations
for attrname in node.attributes.keys():
attr = node.getAttributeNode(attrname)
if attr.namespaceURI == None and ':' in attr.nodeName:
prefix = attr.nodeName.split(':')[0]
if nsmap.has_key(prefix):
del attributes[(attr.namespaceURI, attr.localName)]
attributes[(nsmap[prefix],attr.localName)]=attr.nodeValue
# SAX events
ns = node.namespaceURI or nsmap.get(None,None)
handler.startElementNS((ns,node.nodeName), node.nodeName, attributes)
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endElementNS((ns, node.nodeName), node.nodeName)
for prefix in prefixes: handler.endPrefixMapping(prefix)
elif node.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
handler.characters(node.nodeValue)
elif node.nodeType == Node.DOCUMENT_NODE:
handler.startDocument()
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endDocument()
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
for child in node.childNodes: dom2sax(child, handler, nsmap)
else:
# ATTRIBUTE_NODE
# ENTITY_NODE
# PROCESSING_INSTRUCTION_NODE
# COMMENT_NODE
# DOCUMENT_TYPE_NODE
# NOTATION_NODE
pass
return locals()
# Keep backwards compatibility with things that directly load
# classes/functions from this module
for key, value in getDomModule(minidom).__dict__.items():
globals()[key] = value
|
|
"""
Solvers for two-stage robust optimization problems
References:
[1]Solving two-stage robust optimization problems using a column-and-constraint generation method
The standard format is equation (1)
It should be noted that, the second stage problem is solved using dual problems
"""
from numpy import inf, array, concatenate, ones, zeros, eye, diag, hstack
from solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as lp
class TwoStageRobustOptimization():
"""
column-and-constraint generation method for two-stage robust optimization
one kind of primal-dual cutus methods
"""
def __init__(self):
self.name = " Robust optimization"
def main(self, c, Aeq=None, beq=None, A=None, b=None, lb=None, ub=None, vtypes=None, d=None, G=None,
E=None, M=None, h=None, u_mean=None, u_delta=None, budget=None):
"""
:param c: First stage decision objective function
:param Aeq: Equal constraints of the first stage decision making
:param beq: Equal constraints of the first stage decision making
:param A: Inequal constraints of the first stage decision making
:param b: Inequal constraints of the first stage decision making
:param lb: Lower boundary of the first stage decision making
:param ub: Upper boundary of the first stage decision making
:param vtypes: Variables types of the first stage decision making
:param d: Objective function of the second stage decision making
:param G: Inequality constraint with the second stage variables
:param E: Inequality constraint with the first stage variables
:param M: Inequality constraint with the uncertainty variables
:param h: Inequality constants in the second stage decision making
:param u_mean: mean value of the uncertainty variables
:param u_delta: boundary information of the uncertainty variables
:param budget: budget constraints on the uncertainty variables
:return:
"""
if Aeq is not None:
neq = Aeq.shape[0]
else:
neq = 0
if A is not None:
nineq = A.shape[0]
else:
nineq = 0
bigM = 10 ** 8
# Modify the first stage decision variable
ny = c.shape[0]
c_first_stage = concatenate([c, array([[1]])], axis=0) # Column wise append
lb_first_stage = concatenate([lb, array([[-bigM]])], axis=0)
ub_first_stage = concatenate([ub, array([[bigM]])], axis=0)
Aeq_first_stage = hstack([Aeq, zeros((neq, 1))])
beq_first_stage = beq
A_first_stage = hstack([A, zeros((nineq, 1))])
b_first_stage = b
vtypes.append("c")
# Solve the first stage optimization problem
(yy, obj_first_stage, success_first_stage) = lp(c_first_stage,
Aeq=Aeq_first_stage,
beq=beq_first_stage,
A=A_first_stage,
b=b_first_stage,
xmin=lb_first_stage,
xmax=ub_first_stage,
vtypes=vtypes, objsense="min")
# Modify the second stage decision making problems
# U = u_mean - u_delta + 2*I*u_delta
y = array(yy[0:ny]).reshape((ny, 1))
nx = d.shape[0]
nu = u_mean.shape[0]
nlam = G.shape[0] # The number of auxilary variables
lb_second_stage = zeros((nlam + nu, 1))
lb_second_stage = concatenate([lb_second_stage, -bigM * ones((nu, 1))], axis=0)
ub_second_stage = concatenate([bigM * ones((nlam, 1)), ones((nu, 1)), bigM * ones((nu, 1))], axis=0)
vtypes_second_stage = ["c"] * nlam + ["b"] * nu + ["c"] * nu
c_second_stage = zeros((nlam + nu + nu, 1))
c_second_stage[0:nlam] = h - E.dot(y) - M.dot(u_mean - u_delta)
c_second_stage[nlam + nu:] = -2 * u_delta
# The constraint set
# Equal constraints
Aeq_second_stage = zeros((nx, nlam + nu + nu))
beq_second_stage = d
Aeq_second_stage[:, 0:nlam] = G.transpose()
# The McCormick envelopes
# 1)
A_second_stage = zeros((nu, nlam + nu + nu))
b_second_stage = zeros((nu, 1))
A_second_stage[:, nlam:nlam + nu] = -bigM * eye(nu)
A_second_stage[:, nlam + nu:] = -eye(nu)
# 2)
A_second_stage_temp = zeros((nu, nlam + nu + nu))
b_second_stage_temp = bigM * ones((nu, 1))
A_second_stage_temp[:, 0:nlam] = M.transpose()
A_second_stage_temp[:, nlam:nlam + nu] = bigM * eye(nu)
A_second_stage_temp[:, nlam + nu:] = -eye(nu)
A_second_stage = concatenate([A_second_stage, A_second_stage_temp], axis=0)
b_second_stage = concatenate([b_second_stage, b_second_stage_temp], axis=0)
# 3)
A_second_stage_temp = zeros((nu, nlam + nu + nu))
b_second_stage_temp = bigM * ones((nu, 1))
A_second_stage_temp[:, 0:nlam] = -M.transpose()
A_second_stage_temp[:, nlam:nlam + nu] = bigM * eye(nu)
A_second_stage_temp[:, nlam + nu:] = eye(nu)
A_second_stage = concatenate([A_second_stage, A_second_stage_temp], axis=0)
b_second_stage = concatenate([b_second_stage, b_second_stage_temp], axis=0)
# 4)
A_second_stage_temp = zeros((nu, nlam + nu + nu))
b_second_stage_temp = zeros((nu, 1))
A_second_stage_temp[:, nlam:nlam + nu] = bigM * eye(nu)
A_second_stage_temp[:, nlam + nu:] = eye(nu)
A_second_stage = concatenate([A_second_stage, A_second_stage_temp], axis=0)
b_second_stage = concatenate([b_second_stage, b_second_stage_temp], axis=0)
# 5) budget constraints
A_second_stage_temp = zeros((1, nlam + nu + nu))
A_second_stage_temp[0, nlam:nlam + nu] = ones((1, nu))
b_second_stage_temp = budget
A_second_stage = concatenate([A_second_stage, A_second_stage_temp], axis=0)
b_second_stage = concatenate([b_second_stage, b_second_stage_temp], axis=0)
(x, obj_second_stage, success_second_stage) = lp(c_second_stage,
Aeq=Aeq_second_stage,
beq=beq_second_stage,
A=A_second_stage,
b=b_second_stage,
xmin=lb_second_stage,
xmax=ub_second_stage,
vtypes=vtypes_second_stage,
objsense="max")
LB = obj_first_stage
UB = ((c_first_stage[0:ny].transpose()).dot(y) + obj_second_stage)[0][0]
Gap = abs((UB - LB) / LB)
k = 0
kmax = 1000
while Gap > 10 ** -2:
# Solve the first stage problem
(yy, obj_first_stage, success_first_stage) = lp(c_first_stage,
Aeq=Aeq_first_stage,
beq=beq_first_stage,
A=A_first_stage,
b=b_first_stage,
xmin=lb_first_stage,
xmax=ub_first_stage, vtypes=vtypes, objsense="min")
y = array(yy[0:ny]).reshape((ny, 1))
# Update the second stage problem
c_second_stage[0:nlam] = h - E.dot(y) - M.dot(u_mean - u_delta)
# solve the second stage problem
(x, obj_second_stage, success_second_stage) = lp(c_second_stage,
Aeq=Aeq_second_stage,
beq=beq_second_stage,
A=A_second_stage,
b=b_second_stage,
xmin=lb_second_stage,
xmax=ub_second_stage,
vtypes=vtypes_second_stage,
objsense="max")
# Update gap
LB = obj_first_stage
UB = ((c_first_stage[0:ny].transpose()).dot(y) + obj_second_stage)[0][0]
Gap = abs((UB - LB) / LB)
print("The upper boundary is {0}".format(UB))
print("The lower boundary is {0}".format(LB))
print("The gap is {0}".format(Gap))
# Obtain cuts
x = array(x).reshape((len(x), 1))
Iu = x[nlam:nlam + nu]
u = u_mean - u_delta + 2 * diag(Iu) * u_delta
# Add cuts to the first stage optimization problem
nx_temp = c_first_stage.shape[0] # Previous decision variables
c_first_stage = concatenate([c_first_stage, zeros((nx, 1))], axis=0) # Column wise append
lb_first_stage = concatenate([lb_first_stage, -bigM * ones((nx, 1))], axis=0)
ub_first_stage = concatenate([ub_first_stage, bigM * ones((nx, 1))], axis=0)
vtypes_temp = ["c"] * nx
vtypes += ["c"] * nx
neq = Aeq_first_stage.shape[0]
Aeq_first_stage = hstack([Aeq_first_stage, zeros((neq, nx))])
nineq = A_first_stage.shape[0]
A_first_stage = hstack([A_first_stage, zeros((nineq, nx))])
# Add primal cuts
A_first_stage_temp = zeros((h.shape[0], nx_temp + nx))
A_first_stage_temp[:, 0:ny] = -E
A_first_stage_temp[:, nx_temp:] = -G
b_first_stage_temp = M.dot(u) - h
A_first_stage = concatenate([A_first_stage, A_first_stage_temp])
b_first_stage = concatenate([b_first_stage, b_first_stage_temp])
A_first_stage_temp = zeros((1, nx_temp + nx))
b_first_stage_temp = zeros((1, 1))
A_first_stage_temp[0, nx_temp:] = d.transpose()
A_first_stage_temp[0, ny] = -1
A_first_stage = concatenate([A_first_stage, A_first_stage_temp])
b_first_stage = concatenate([b_first_stage, b_first_stage_temp])
k += 1
if k > kmax:
break
return y
if __name__ == "__main__":
robust_optimization = TwoStageRobustOptimization()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq layer operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = ["Decoder", "dynamic_decode"]
_transpose_batch_time = rnn._transpose_batch_time # pylint: disable=protected-access
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class Decoder(object):
"""An RNN Decoder abstract interface object.
Concepts used by this interface:
- `inputs`: (structure of) tensors and TensorArrays that is passed as input to
the RNNCell composing the decoder, at each time step.
- `state`: (structure of) tensors and TensorArrays that is passed to the
RNNCell instance as the state.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `outputs`: Instance of BasicDecoderOutput. Result of the decoding, at each
time step.
"""
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape` object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
@abc.abstractmethod
def initialize(self, name=None):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
name: Name scope for any created operations.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
@abc.abstractmethod
def step(self, time, inputs, state, name=None):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNNCell input (possibly nested tuple of) tensor[s] for this time
step.
state: RNNCell state (possibly nested tuple of) tensor[s] from previous
time step.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an object
containing the decoder output, `next_state` is a (structure of) state
tensors and TensorArrays, `next_inputs` is the tensor that should be used
as input for the next step, `finished` is a boolean tensor telling whether
the sequence is complete, for each sequence in the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
"""Called after decoding iterations complete.
Args:
outputs: RNNCell outputs (possibly nested tuple of) tensor[s] for all time
steps.
final_state: RNNCell final state (possibly nested tuple of) tensor[s] for
last time step.
sequence_lengths: 1-D `int32` tensor containing lengths of each sequence.
Returns:
`(final_outputs, final_state)`: `final_outputs` is an object containing
the final decoder output, `final_state` is a (structure of) state tensors
and TensorArrays.
"""
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `dynamic_decode` function keeps track
of which batch entries are already finished, and performs a logical OR to
insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`dynamic_decode` will mix up the finished state across these entries because
it does not track the reshuffle across time steps. In this case, it is
up to the decoder to declare that it will keep track of its own finished
state by setting this property to `True`.
Returns:
Python bool.
"""
return False
class BaseDecoder(layers.Layer):
"""An RNN Decoder that is based on a Keras layer.
Concepts used by this interface:
- `inputs`: (structure of) tensors and TensorArrays that is passed as input to
the RNNCell composing the decoder, at each time step.
- `state`: (structure of) tensors and TensorArrays that is passed to the
RNNCell instance as the state.
- `memory`: (sturecute of) tensors that is usually the full output of the
encoder, which will be used for the attention wrapper for the RNNCell.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `outputs`: Instance of BasicDecoderOutput. Result of the decoding, at each
time step.
"""
def __init__(self,
output_time_major=False,
impute_finished=False,
maximum_iterations=None,
parallel_iterations=32,
swap_memory=False,
**kwargs):
self.output_time_major = output_time_major
self.impute_finished = impute_finished
self.maximum_iterations = maximum_iterations
self.parallel_iterations = parallel_iterations
self.swap_memory = swap_memory
super(BaseDecoder, self).__init__(**kwargs)
def call(self, inputs, initial_state=None, **kwargs):
init_kwargs = kwargs
init_kwargs["initial_state"] = initial_state
return dynamic_decode(self,
output_time_major=self.output_time_major,
impute_finished=self.impute_finished,
maximum_iterations=self.maximum_iterations,
parallel_iterations=self.parallel_iterations,
swap_memory=self.swap_memory,
decoder_init_input=inputs,
decoder_init_kwargs=init_kwargs)
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape` object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
def initialize(self, inputs, initial_state=None, **kwargs):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
inputs: (structure of) tensors that contains the input for the decoder. In
the normal case, its a tensor with shape [batch, timestep, embedding].
initial_state: (structure of) tensors that contains the initial state for
the RNNCell.
**kwargs: Other arguments that are passed in from layer.call() method. It
could contains item like input sequence_length, or masking for input.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
def step(self, time, inputs, state):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNNCell input (possibly nested tuple of) tensor[s] for this time
step.
state: RNNCell state (possibly nested tuple of) tensor[s] from previous
time step.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an object
containing the decoder output, `next_state` is a (structure of) state
tensors and TensorArrays, `next_inputs` is the tensor that should be used
as input for the next step, `finished` is a boolean tensor telling whether
the sequence is complete, for each sequence in the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `dynamic_decode` function keeps track
of which batch entries are already finished, and performs a logical OR to
insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`dynamic_decode` will mix up the finished state across these entries because
it does not track the reshuffle across time steps. In this case, it is
up to the decoder to declare that it will keep track of its own finished
state by setting this property to `True`.
Returns:
Python bool.
"""
return False
# TODO(scottzhu): Add build/get_config/from_config and other layer methods.
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _create(s, d):
return _zero_state_tensors(s, batch_size, d)
return nest.map_structure(_create, size, dtype)
def dynamic_decode(decoder,
output_time_major=False,
impute_finished=False,
maximum_iterations=None,
parallel_iterations=32,
swap_memory=False,
scope=None,
**kwargs):
"""Perform dynamic decoding with `decoder`.
Calls initialize() once and step() repeatedly on the Decoder object.
Args:
decoder: A `Decoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is faster).
Otherwise, outputs are returned as batch major tensors (this adds extra
time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: `int32` scalar, maximum allowed number of decoding
steps. Default is `None` (decode until the decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
scope: Optional variable scope to use.
**kwargs: dict, other keyword arguments for dynamic_decode. It might contain
arguments for `BaseDecoder` to initialize, which takes all tensor inputs
during call().
Returns:
`(final_outputs, final_state, final_sequence_lengths)`.
Raises:
TypeError: if `decoder` is not an instance of `Decoder`.
ValueError: if `maximum_iterations` is provided but is not a scalar.
"""
if not isinstance(decoder, (Decoder, BaseDecoder)):
raise TypeError("Expected decoder to be type Decoder, but saw: %s" %
type(decoder))
with variable_scope.variable_scope(scope, "decoder") as varscope:
# Determine context types.
ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
is_xla = control_flow_util.GetContainingXLAContext(ctxt) is not None
in_while_loop = (
control_flow_util.GetContainingWhileContext(ctxt) is not None)
# Properly cache variable values inside the while_loop.
# Don't set a caching device when running in a loop, since it is possible
# that train steps could be wrapped in a tf.while_loop. In that scenario
# caching prevents forward computations in loop iterations from re-reading
# the updated weights.
if not context.executing_eagerly() and not in_while_loop:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
if maximum_iterations is not None:
maximum_iterations = ops.convert_to_tensor(
maximum_iterations, dtype=dtypes.int32, name="maximum_iterations")
if maximum_iterations.get_shape().ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
if isinstance(decoder, Decoder):
initial_finished, initial_inputs, initial_state = decoder.initialize()
else:
# For BaseDecoder that takes tensor inputs during call.
decoder_init_input = kwargs.pop("decoder_init_input", None)
decoder_init_kwargs = kwargs.pop("decoder_init_kwargs", {})
initial_finished, initial_inputs, initial_state = decoder.initialize(
decoder_init_input, **decoder_init_kwargs)
zero_outputs = _create_zero_outputs(decoder.output_size,
decoder.output_dtype,
decoder.batch_size)
if is_xla and maximum_iterations is None:
raise ValueError("maximum_iterations is required for XLA compilation.")
if maximum_iterations is not None:
initial_finished = math_ops.logical_or(
initial_finished, 0 >= maximum_iterations)
initial_sequence_lengths = array_ops.zeros_like(
initial_finished, dtype=dtypes.int32)
initial_time = constant_op.constant(0, dtype=dtypes.int32)
def _shape(batch_size, from_shape):
if (not isinstance(from_shape, tensor_shape.TensorShape) or
from_shape.ndims == 0):
return None
else:
batch_size = tensor_util.constant_value(
ops.convert_to_tensor(
batch_size, name="batch_size"))
return tensor_shape.TensorShape([batch_size]).concatenate(from_shape)
dynamic_size = maximum_iterations is None or not is_xla
def _create_ta(s, d):
return tensor_array_ops.TensorArray(
dtype=d,
size=0 if dynamic_size else maximum_iterations,
dynamic_size=dynamic_size,
element_shape=_shape(decoder.batch_size, s))
initial_outputs_ta = nest.map_structure(_create_ta, decoder.output_size,
decoder.output_dtype)
def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs,
finished, unused_sequence_lengths):
return math_ops.logical_not(math_ops.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
decoder_state_sequence_lengths = False
if decoder.tracks_own_finished:
next_finished = decoder_finished
lengths = getattr(decoder_state, "lengths", None)
if lengths is not None:
# sequence lengths are provided by decoder_state.lengths; overwrite
# our sequence lengths.
decoder_state_sequence_lengths = True
sequence_lengths = math_ops.cast(lengths, dtypes.int32)
else:
next_finished = math_ops.logical_or(decoder_finished, finished)
if decoder_state_sequence_lengths:
# Just pass something through the loop; at the next iteration we'll pull
# the sequence lengths from the decoder_state again.
next_sequence_lengths = sequence_lengths
else:
next_sequence_lengths = array_ops.where(
math_ops.logical_not(finished),
array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
sequence_lengths)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)
res = control_flow_ops.while_loop(
condition,
body,
loop_vars=(
initial_time,
initial_outputs_ta,
initial_state,
initial_inputs,
initial_finished,
initial_sequence_lengths,
),
parallel_iterations=parallel_iterations,
maximum_iterations=maximum_iterations,
swap_memory=swap_memory)
final_outputs_ta = res[1]
final_state = res[2]
final_sequence_lengths = res[5]
final_outputs = nest.map_structure(lambda ta: ta.stack(), final_outputs_ta)
try:
final_outputs, final_state = decoder.finalize(
final_outputs, final_state, final_sequence_lengths)
except NotImplementedError:
pass
if not output_time_major:
final_outputs = nest.map_structure(_transpose_batch_time, final_outputs)
return final_outputs, final_state, final_sequence_lengths
|
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import httplib2
import logging
import pickle
import time
import clientsecrets
from anyjson import simplejson
from client import AccessTokenRefreshError
from client import AssertionCredentials
from client import Credentials
from client import Flow
from client import OAuth2WebServerFlow
from client import Storage
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.api import app_identity
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
pass
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for
the purpose of accessing data stored under an account assigned to the App
Engine application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or list of strings, scope(s) of the credentials being requested.
"""
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
super(AppAssertionCredentials, self).__init__(
None,
None,
None)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
(token, _) = app_identity.get_access_token(self.scope)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logging.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logging.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logging.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
cache: memcache, a write-through cache to put in front of the datastore
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
json = self._cache.get(self._key_name)
if json:
return Credentials.new_from_json(json)
credential = None
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
if self._cache:
self._cache.set(self._key_name, credential.to_json())
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
entity.delete()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, client_id, client_secret, scope,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
user_agent=None,
message=None, **kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self.flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
auth_uri, token_uri, **kwargs)
self.credentials = None
self._request_handler = None
self._message = message
self._in_error = False
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(self._message)
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
method(request_handler, *args, **kwargs)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
callback = self._request_handler.request.relative_url('/oauth2callback')
url = self.flow.step1_get_authorize_url(callback)
user = users.get_current_user()
memcache.set(user.user_id(), pickle.dumps(self.flow),
namespace=OAUTH2CLIENT_NAMESPACE)
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, filename, scope, message=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type not in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError('OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
super(OAuth2DecoratorFromClientSecrets,
self).__init__(
client_info['client_id'],
client_info['client_secret'],
scope,
client_info['auth_uri'],
client_info['token_uri'],
message)
except clientsecrets.InvalidClientSecretsError:
self._in_error = True
if message is not None:
self._message = message
else:
self._message = "Please configure your application for OAuth 2.0"
def oauth2decorator_from_clientsecrets(filename, scope, message=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope, message)
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % errormsg)
else:
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id(),
namespace=OAUTH2CLIENT_NAMESPACE))
# This code should be ammended with application specific error
# handling. The following cases should be considered:
# 1. What if the flow doesn't exist in memcache? Or is corrupt?
# 2. What if the step2_exchange fails?
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
self.redirect(str(self.request.get('state')))
else:
# TODO Add error handling here.
pass
application = webapp.WSGIApplication([('/oauth2callback', OAuth2Handler)])
def main():
run_wsgi_app(application)
|
|
import urwid
from urwid.util import decompose_tagmarkup
from gviewer.action import Actions
from gviewer.basic_widget import BasicWidget, SearchableText
from gviewer.util import stringfy, unicode_it
class Base(object): # pragma: no cover
"""Abstract class for view displayer eleemnt"""
def widget(self, message, controller=None, context=None):
raise NotImplementedError
def __unicode__(self):
raise NotImplementedError
def __str__(self):
return stringfy(self.__unicode__())
def __bytes__(self): # pragma: no cover
return self.__unicode__().encode("utf8")
class Text(Base):
"""One line detail content
Attributes:
content: str or unicode
"""
def __init__(self, content):
self.content = content
def widget(self, message, controller=None, context=None):
return SearchableText(self.content, attr_map="view-item")
def __unicode__(self):
text, _ = decompose_tagmarkup(self.content)
return unicode_it(text)
class Prop(Base):
""" Key-value property
Attributes:
key: str or unicode represent property key
value: str or unicode represent property value
"""
def __init__(self, key, value):
self.kv = (key, value)
self.max_key_length = 0
def widget(self, message, controller=None, context=None):
return SearchableText(
[("view-item key", self.kv[0].ljust(self.max_key_length + 1) + ": "),
("view-item value", self.kv[1])])
def __unicode__(self):
return u"{0}: {1}".format(self.kv[0].ljust(self.max_key_length + 1), self.kv[1])
class Group(object):
"""Group of view items
Attributes:
title: the group title
items: iterable of Prop or Line
"""
def __init__(self, title, items, show_title=True):
self.title = title
self.items = items
self.show_title = show_title
def widgets(self, message, controller=None, context=None):
widgets = []
if self.show_title:
widgets.append(TitleWidget(self.title))
widgets += [e.widget(message, controller=controller, context=context) for e in self.items]
return widgets
def __unicode__(self): # pragma: no cover
text = u"\n".join([str(e) for e in self.items])
if self.show_title:
text = self.title + u"\n" + text
return text
def __str__(self):
return stringfy(self.__unicode__())
def __bytes__(self): # pragma: no cover
return self.__unicode__().encode("utf8")
class PropsGroup(Group):
"""Group of Prop
Attributes:
title: str or unicode
items: iterable of Prop
"""
def __init__(self, title, items, *args, **kwargs):
if items:
max_key_length = max(map(lambda p: len(p.kv[0]), items))
else:
max_key_length = 0
for p in items:
p.max_key_length = max_key_length
super(PropsGroup, self).__init__(title, items, *args, **kwargs)
class View(Base):
"""View Element
Attributes:
groups: iterable of Group
actions: dict defined {key: callback}
"""
def __init__(self, groups, actions=None):
self.groups = groups
self.actions = actions or Actions()
def widget(self, message, controller=None, context=None):
widgets = []
for group in self.groups:
widgets += group.widgets(message, controller=controller, context=context)
widgets.append(EmptyLine())
if not widgets:
widgets.append(EmptyLine())
return ContentWidget(
widgets, message, self.actions, controller=controller,
context=context)
def __unicode__(self):
return u"\n".join([str(g) + u"\n" for g in self.groups])
class TitleWidget(BasicWidget):
"""Widget for title"""
def __init__(self, content):
widget = urwid.Text(content)
super(TitleWidget, self).__init__(
widget=widget,
attr_map="view-title")
class ContentWidget(BasicWidget):
"""Widget for view items"""
def __init__(self, widgets, message, actions=None, controller=None, context=None):
walker = urwid.SimpleFocusListWalker(widgets)
widget = urwid.ListBox(walker)
super(ContentWidget, self).__init__(
controller=controller, context=context,
widget=widget)
self.prev_match = 0
self.message = message
self.actions = actions or Actions()
def search_next(self, keyword):
curr_index = self._w.get_focus()[1]
if self.prev_match != curr_index:
self.clear_prev_search()
match_index = len(self._w.body) - 1
for index in range(curr_index, len(self._w.body)):
try:
if self._w.body[index].search_next(keyword):
match_index = index
break
except AttributeError: # pragma: no cover
pass
self.prev_match = match_index
self._w.set_focus(match_index)
def search_prev(self, keyword):
curr_index = self._w.get_focus()[1]
if self.prev_match != curr_index:
self.clear_prev_search()
match_index = 0
for index in reversed(range(0, curr_index + 1)):
try:
if self._w.body[index].search_prev(keyword):
match_index = index
break
except AttributeError: # pragma: no cover
pass
self.prev_match = match_index
self._w.set_focus(match_index)
def clear_prev_search(self):
try:
self._w.body[self.prev_match].clear()
except AttributeError: # pragma: no cover
pass
def keypress(self, size, key):
if key in self.actions:
try:
self.actions[key].__call__(self.controller, self.message)
except: # pragma: no cover
self.controller.open_error()
return None
return super(ContentWidget, self).keypress(size, key)
class EmptyLine(urwid.Text):
def __init__(self):
super(EmptyLine, self).__init__("")
|
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import matplotlib.pyplot as plt
import cirq
import cirq.experiments.qubit_characterizations as ceqc
from cirq import GridQubit
from cirq import circuits, ops, sim
from cirq.experiments import (
rabi_oscillations,
single_qubit_randomized_benchmarking,
two_qubit_randomized_benchmarking,
single_qubit_state_tomography,
two_qubit_state_tomography,
)
def test_rabi_oscillations():
# Check that the excited state population matches the ideal case within a
# small statistical error.
simulator = sim.Simulator()
qubit = GridQubit(0, 0)
results = rabi_oscillations(simulator, qubit, np.pi, repetitions=1000)
data = np.asarray(results.data)
angles = data[:, 0]
actual_pops = data[:, 1]
target_pops = 0.5 - 0.5 * np.cos(angles)
rms_err = np.sqrt(np.mean((target_pops - actual_pops) ** 2))
assert rms_err < 0.1
def test_single_qubit_cliffords():
I = np.eye(2)
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.diag([1, -1])
PAULIS = (I, X, Y, Z)
def is_pauli(u):
return any(cirq.equal_up_to_global_phase(u, p) for p in PAULIS)
cliffords = ceqc._single_qubit_cliffords()
assert len(cliffords.c1_in_xy) == 24
assert len(cliffords.c1_in_xz) == 24
def unitary(gates):
U = np.eye(2)
for gate in gates:
U = cirq.unitary(gate) @ U
return U
xy_unitaries = [unitary(gates) for gates in cliffords.c1_in_xy]
xz_unitaries = [unitary(gates) for gates in cliffords.c1_in_xz]
def check_distinct(unitaries):
n = len(unitaries)
for i in range(n):
for j in range(i + 1, n):
Ui, Uj = unitaries[i], unitaries[j]
assert not cirq.allclose_up_to_global_phase(Ui, Uj), f'{i}, {j}'
# Check that unitaries in each decomposition are distinct.
check_distinct(xy_unitaries)
check_distinct(xz_unitaries)
# Check that each decomposition gives the same set of unitaries.
for Uxy in xy_unitaries:
assert any(cirq.allclose_up_to_global_phase(Uxy, Uxz) for Uxz in xz_unitaries)
# Check that each unitary fixes the Pauli group.
for u in xy_unitaries:
for p in PAULIS:
assert is_pauli(u @ p @ u.conj().T), str(u)
# Check that XZ decomposition has at most one X gate per clifford.
for gates in cliffords.c1_in_xz:
num_x = len([gate for gate in gates if isinstance(gate, cirq.XPowGate)])
num_z = len([gate for gate in gates if isinstance(gate, cirq.ZPowGate)])
assert num_x + num_z == len(gates)
assert num_x <= 1
def test_single_qubit_randomized_benchmarking():
# Check that the ground state population at the end of the Clifford
# sequences is always unity.
simulator = sim.Simulator()
qubit = GridQubit(0, 0)
num_cfds = range(5, 20, 5)
results = single_qubit_randomized_benchmarking(
simulator, qubit, num_clifford_range=num_cfds, repetitions=100
)
g_pops = np.asarray(results.data)[:, 1]
assert np.isclose(np.mean(g_pops), 1.0)
def test_two_qubit_randomized_benchmarking():
# Check that the ground state population at the end of the Clifford
# sequences is always unity.
simulator = sim.Simulator()
q_0 = GridQubit(0, 0)
q_1 = GridQubit(0, 1)
num_cfds = [5, 10]
results = two_qubit_randomized_benchmarking(
simulator, q_0, q_1, num_clifford_range=num_cfds, num_circuits=10, repetitions=100
)
g_pops = np.asarray(results.data)[:, 1]
assert np.isclose(np.mean(g_pops), 1.0)
def test_single_qubit_state_tomography():
# Check that the density matrices of the output states of X/2, Y/2 and
# H + Y gates closely match the ideal cases.
simulator = sim.Simulator()
qubit = GridQubit(0, 0)
circuit_1 = circuits.Circuit(ops.X(qubit) ** 0.5)
circuit_2 = circuits.Circuit(ops.Y(qubit) ** 0.5)
circuit_3 = circuits.Circuit(ops.H(qubit), ops.Y(qubit))
act_rho_1 = single_qubit_state_tomography(simulator, qubit, circuit_1, 1000).data
act_rho_2 = single_qubit_state_tomography(simulator, qubit, circuit_2, 1000).data
act_rho_3 = single_qubit_state_tomography(simulator, qubit, circuit_3, 1000).data
tar_rho_1 = np.array([[0.5, 0.5j], [-0.5j, 0.5]])
tar_rho_2 = np.array([[0.5, 0.5], [0.5, 0.5]])
tar_rho_3 = np.array([[0.5, -0.5], [-0.5, 0.5]])
np.testing.assert_almost_equal(act_rho_1, tar_rho_1, decimal=1)
np.testing.assert_almost_equal(act_rho_2, tar_rho_2, decimal=1)
np.testing.assert_almost_equal(act_rho_3, tar_rho_3, decimal=1)
def test_two_qubit_state_tomography():
# Check that the density matrices of the four Bell states closely match
# the ideal cases. In addition, check that the output states of
# single-qubit rotations (H, H), (X/2, Y/2), (Y/2, X/2) have the correct
# density matrices.
simulator = sim.Simulator()
q_0 = GridQubit(0, 0)
q_1 = GridQubit(0, 1)
circuit_00 = circuits.Circuit(ops.H(q_0), ops.CNOT(q_0, q_1))
circuit_01 = circuits.Circuit(ops.X(q_1), ops.H(q_0), ops.CNOT(q_0, q_1))
circuit_10 = circuits.Circuit(ops.X(q_0), ops.H(q_0), ops.CNOT(q_0, q_1))
circuit_11 = circuits.Circuit(ops.X(q_0), ops.X(q_1), ops.H(q_0), ops.CNOT(q_0, q_1))
circuit_hh = circuits.Circuit(ops.H(q_0), ops.H(q_1))
circuit_xy = circuits.Circuit(ops.X(q_0) ** 0.5, ops.Y(q_1) ** 0.5)
circuit_yx = circuits.Circuit(ops.Y(q_0) ** 0.5, ops.X(q_1) ** 0.5)
act_rho_00 = two_qubit_state_tomography(simulator, q_0, q_1, circuit_00, 1000).data
act_rho_01 = two_qubit_state_tomography(simulator, q_0, q_1, circuit_01, 1000).data
act_rho_10 = two_qubit_state_tomography(simulator, q_0, q_1, circuit_10, 1000).data
act_rho_11 = two_qubit_state_tomography(simulator, q_0, q_1, circuit_11, 1000).data
act_rho_hh = two_qubit_state_tomography(simulator, q_0, q_1, circuit_hh, 1000).data
act_rho_xy = two_qubit_state_tomography(simulator, q_0, q_1, circuit_xy, 1000).data
act_rho_yx = two_qubit_state_tomography(simulator, q_0, q_1, circuit_yx, 1000).data
tar_rho_00 = np.outer([1.0, 0, 0, 1.0], [1.0, 0, 0, 1.0]) * 0.5
tar_rho_01 = np.outer([0, 1.0, 1.0, 0], [0, 1.0, 1.0, 0]) * 0.5
tar_rho_10 = np.outer([1.0, 0, 0, -1.0], [1.0, 0, 0, -1.0]) * 0.5
tar_rho_11 = np.outer([0, 1.0, -1.0, 0], [0, 1.0, -1.0, 0]) * 0.5
tar_rho_hh = np.outer([0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5])
tar_rho_xy = np.outer([0.5, 0.5, -0.5j, -0.5j], [0.5, 0.5, 0.5j, 0.5j])
tar_rho_yx = np.outer([0.5, -0.5j, 0.5, -0.5j], [0.5, 0.5j, 0.5, 0.5j])
np.testing.assert_almost_equal(act_rho_00, tar_rho_00, decimal=1)
np.testing.assert_almost_equal(act_rho_01, tar_rho_01, decimal=1)
np.testing.assert_almost_equal(act_rho_10, tar_rho_10, decimal=1)
np.testing.assert_almost_equal(act_rho_11, tar_rho_11, decimal=1)
np.testing.assert_almost_equal(act_rho_hh, tar_rho_hh, decimal=1)
np.testing.assert_almost_equal(act_rho_xy, tar_rho_xy, decimal=1)
np.testing.assert_almost_equal(act_rho_yx, tar_rho_yx, decimal=1)
@pytest.mark.usefixtures('closefigures')
def test_tomography_plot_raises_for_incorrect_number_of_axes():
simulator = sim.Simulator()
qubit = GridQubit(0, 0)
circuit = circuits.Circuit(ops.X(qubit) ** 0.5)
result = single_qubit_state_tomography(simulator, qubit, circuit, 1000)
with pytest.raises(TypeError): # ax is not a List[plt.Axes]
ax = plt.subplot()
result.plot(ax)
with pytest.raises(ValueError):
_, axes = plt.subplots(1, 3)
result.plot(axes)
|
|
import csv
import rdflib
from rdflib.namespace import RDFS, RDF, OWL
from rdflib.term import URIRef
import threading
from apimarkdown import Markdown
from apirdflib import RDFLIBLOCK
import logging
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
class sdordf2csv():
def __init__(self, queryGraph=None, fullGraph=None, markdownComments=True,excludeAttic=False):
self.setqueryGraph(queryGraph)
self.setfullGraph(fullGraph)
self.setexcludeAttic(excludeAttic)
self.setmarkdownComments(markdownComments)
def setqueryGraph(self,graph=None):
self.queryGraph = graph
def setfullGraph(self,graph=None):
self.fullGraph = graph
def setexcludeAttic(self,state):
self.excludeAttic = state
def setmarkdownComments(self,state):
self.markdown = state
def doQuery(self,graph=None,query=None):
res = None
try:
RDFLIBLOCK.acquire()
res = list(graph.query(query))
finally:
RDFLIBLOCK.release()
return res
def outputCSVtypes(self,file):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?term schema:isPartOf <http://attic.schema.org>}."
query= ('''select ?term where {
?term a ?type.
BIND(STR(?term) AS ?strVal)
FILTER NOT EXISTS {?term a rdf:Property}.
FILTER(STRLEN(?strVal) >= 18 && SUBSTR(?strVal, 1, 18) = "http://schema.org/").
%s
}
ORDER BY ?term
''') % atticfilter
try:
RDFLIBLOCK.acquire()
types = list(self.queryGraph.query(query))
finally:
RDFLIBLOCK.release()
#log.info( "Types: %s" % len(types))
self.type2CSV(header=True,out=file)
for t in types:
self.type2CSV(term=t.term,header=False,out=file,graph=self.queryGraph)
def outputCSVproperties(self,file):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?term schema:isPartOf <http://attic.schema.org>}."
query= ('''select ?term where {
?term a rdf:Property.
FILTER EXISTS {?term rdfs:label ?l}.
BIND(STR(?term) AS ?strVal).
FILTER(STRLEN(?strVal) >= 18 && SUBSTR(?strVal, 1, 18) = "http://schema.org/").
%s
}
ORDER BY ?term''') % atticfilter
props = list(self.queryGraph.query(query))
self.prop2CSV(header=True,out=file)
for t in props:
self.prop2CSV(term=t.term,header=False,out=file,graph=self.queryGraph)
def prop2CSV(self,term=None,header=True,out=None,graph=None):
cols = ["id","label","comment","subPropertyOf","equivalentProperty","subproperties","domainIncludes","rangeIncludes","inverseOf","supersedes","supersededBy","isPartOf"]
if not out:
return
writer = csv.writer(out,quoting=csv.QUOTE_ALL,lineterminator='\n')
if header:
writer.writerow(cols)
return
if not graph:
graph = self.queryGraph
if term == None or graph == None:
return
row = [str(term)]
row.append(self.graphValueToCSV(subject=term,predicate=RDFS.label,graph=graph))
row.append(self.getCSVComment(term,graph=self.fullGraph))
row.append(self.getCSVSuperProperties(term,graph=self.fullGraph))
row.append(self.graphValueToCSV(subject=term,predicate=OWL.equivalentProperty,graph=graph))
row.append(self.getCSVSubProperties(term,graph=self.fullGraph))
row.append(self.getCSVDomainIncludes(term,graph=self.fullGraph))
row.append(self.getCSVRangeIncludes(term,graph=self.fullGraph))
row.append(self.graphValueToCSV(subject=term,predicate=URIRef("http://schema.org/inverseOf"),graph=graph))
row.append(self.getCSVsuperseds(term,graph=self.fullGraph))
row.append(self.getCSVSupersededBy(term,graph=self.fullGraph))
row=[s.encode('utf-8') for s in row]
writer.writerow(row)
#print term
def type2CSV(self,term=None,header=True,out=None,graph=None):
cols = ["id","label","comment","subTypeOf","enumerationtype","equivalentClass","properties","subTypes","supersedes","supersededBy","isPartOf"]
if not out:
return
writer = csv.writer(out,quoting=csv.QUOTE_ALL,lineterminator='\n')
if header:
writer.writerow(cols)
return
if not graph:
graph = self.queryGraph
if term == None or graph == None:
return
if not isinstance(term, URIRef):
term = URIRef(term)
enumType = self.graphValueToCSV(subject=term,predicate=RDF.type,graph=graph)
if enumType.endswith("#Class"):
enumType = ""
row = [str(term)]
row.append(self.graphValueToCSV(subject=term,predicate=RDFS.label,graph=graph))
row.append(self.getCSVComment(term,graph=self.fullGraph))
row.append(self.getCSVSupertypes(term,graph=self.fullGraph))
row.append(enumType)
row.append(self.graphValueToCSV(subject=term,predicate=OWL.equivalentClass,graph=graph))
row.append(self.getCSVTypeProperties(term,graph=self.fullGraph))
row.append(self.getCSVSubtypes(term,graph=self.fullGraph))
row.append(self.getCSVsuperseds(term,graph=self.fullGraph))
row.append(self.getCSVSupersededBy(term,graph=self.fullGraph))
row.append(self.graphValueToCSV(subject=term,predicate=URIRef("http://schema.org/isPartOf"),graph=graph))
row=[s.encode('utf-8') for s in row]
writer.writerow(row)
def graphValueToCSV(self, subject=None, predicate= None, object= None, graph=None):
ret = ""
try:
RDFLIBLOCK.acquire()
ret = str(graph.value(subject=subject,predicate=predicate,object=object))
finally:
RDFLIBLOCK.release()
if ret == None or ret == "None":
ret = ""
return ret
def getCSVSupertypes(self,term=None,graph=None):
query='''select ?sup where{
<%s> rdfs:subClassOf ?sup.
BIND(STR(?sup) AS ?strVal)
FILTER(STRLEN(?strVal) >= 18 && SUBSTR(?strVal, 1, 18) = "http://schema.org/")
}
ORDER BY ?sup''' % term
res = self.doQuery(graph,query)
ret = ', '.join([x.sup for x in res])
return ret
def getCSVTypeProperties(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?prop schema:isPartOf <http://attic.schema.org>.}"
query='''select DISTINCT ?prop where{
?term (^rdfs:subClassOf*) <%s>.
?prop <http://schema.org/domainIncludes> ?term.
%s
}
ORDER BY ?prop''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.prop for x in res])
return ret
def getCSVSubtypes(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?sub schema:isPartOf <http://attic.schema.org>.}"
query='''select ?sub where{
?sub rdfs:subClassOf <%s>.
%s
}
ORDER BY ?sub''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.sub for x in res])
#print "SUBTYPES of %s: '%s'" % (term,ret)
return ret
def getCSVSupersededBy(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?sub schema:isPartOf <http://attic.schema.org>.}"
query='''select ?sup where{
<%s> schema:supersededBy ?sup.
%s
}
ORDER BY ?sup''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.sup for x in res])
#print "%s supercededBy: '%s'" % (term,ret)
return ret
def getCSVsuperseds(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?sup schema:isPartOf <http://attic.schema.org>.}"
query='''select ?sup where{
?sup schema:supersededBy <%s>.
%s
}
ORDER BY ?sup''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.sup for x in res])
#print "%s superseds: '%s'" % (term,ret)
return ret
def getCSVSuperProperties(self,term=None,graph=None):
query='''select ?sup where{
<%s> rdfs:subPropertyOf ?sup.
BIND(STR(?sup) AS ?strVal)
FILTER(STRLEN(?strVal) >= 18 && SUBSTR(?strVal, 1, 18) = "http://schema.org/")
}
ORDER BY ?sup''' % term
res = self.doQuery(graph,query)
ret = ', '.join([x.sup for x in res])
#print "%s subtypeof: '%s'" % (term,ret)
return ret
def getCSVSubProperties(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?sub schema:isPartOf <http://attic.schema.org>.}"
query='''select ?sub where{
?sub rdfs:subPropertyOf <%s>.
%s
}
ORDER BY ?sub''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.sub for x in res])
#print "SUBTYPES of %s: '%s'" % (term,ret)
return ret
def getCSVDomainIncludes(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?type schema:isPartOf <http://attic.schema.org>.}"
query='''select ?type where{
<%s> <http://schema.org/domainIncludes> ?type.
%s
}
ORDER BY ?type''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.type for x in res])
#print "SUBTYPES of %s: '%s'" % (term,ret)
return ret
def getCSVRangeIncludes(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?type schema:isPartOf <http://attic.schema.org>.}"
query='''select ?type where{
<%s> <http://schema.org/rangeIncludes> ?type.
%s
}
ORDER BY ?type''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.type for x in res])
#print "SUBTYPES of %s: '%s'" % (term,ret)
return ret
def getCSVComment(self,term=None,graph=None):
query='''select ?com where{
<%s> rdfs:comment ?com.
}''' % term
res = self.doQuery(graph,query)
ret = ', '.join([x.com for x in res])
#print "SUBTYPES of %s: '%s'" % (term,ret)
if self.markdown:
Markdown.setPre("http://schema.org/")
ret = Markdown.parse(ret)
Markdown.setPre()
return ret
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test feature extraction"""
import math
import tempfile
import tvm
from tvm import te, auto_scheduler
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
def fequal(a, b):
return math.fabs(a - b) < 1e-6
def test_cpu_matmul():
dag = auto_scheduler.ComputeDAG(matmul_auto_scheduler_test(512, 512, 512))
s = dag.get_init_state()
C = s.stage_ops[2]
i, j, k = s[C].iters
io, ii = s.split(C, i, [16])
jo, ji = s.split(C, j, [8])
s.reorder(C, [io, jo, k, ji, ii])
s.vectorize(C, ji)
s.parallel(C, io)
s.parallel(C, jo)
s.unroll(C, k)
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
names = auto_scheduler.feature.get_per_store_feature_names()
fea = auto_scheduler.feature.get_per_store_features_from_states([s], task)[0]
stage_0 = fea[0]
assert len(stage_0) == len(names), "%d vs %d" % (len(stage_0), len(names))
fea_dict = {}
for name, value in zip(names, stage_0):
fea_dict[name] = value
for name in ["B0", "B1", "B2"]:
if fequal(fea_dict[name + ".acc_type.kReadWrite"], 1.0):
c_name = name
if fequal(fea_dict[name + ".acc_type.kRead"], 1.0):
if fequal(fea_dict[name + ".stride"], 0.0):
b_name = name
else:
a_name = name
"""
lowered IR:
Placeholder: A, B
parallel i.0 (0,32)
parallel j.0 (0,64)
unroll k (0,512)
vectorize j.1 (0,8)
for i.1 (0,16)
C...] = A[...] * B[...]
"""
# check touched memory in bytes, touched unique memory in bytes, reuse distance, etc.
assert fequal(fea_dict[c_name + ".bytes"], math.log2(512 ** 3 * 4 + 1))
assert fequal(fea_dict[b_name + ".unique_bytes"], math.log2(512 ** 2 * 4 + 1))
assert fequal(fea_dict[c_name + ".reuse_dis_iter"], math.log2(8 * 16 + 1))
assert fequal(fea_dict[c_name + ".reuse_dis_bytes"], math.log2((8 * 16 + 8 + 16) * 4 + 1))
assert fequal(fea_dict[c_name + ".reuse_ct"], math.log2(512 + 1))
# check annotations
assert fequal(fea_dict["unroll_num"], math.log2(1 + 1))
# assert fequal(fea_dict["unroll_type.kPosInnerReduce"], 1.0)
assert fequal(fea_dict["vec_num"], math.log2(1 + 1))
assert fequal(fea_dict["parallel_num"], math.log2(2 + 1))
assert fequal(fea_dict["parallel_prod"], math.log2((512 * 512 / 16 / 8) + 1))
def test_cpu_fusion():
def fusion_test(N, M):
A = te.placeholder((N, M), name="A")
B = te.compute((N, M), lambda i, j: A[i][j], name="B")
C = te.compute((N, M), lambda i, j: B[i][j], name="C")
return [A, B, C]
dag = auto_scheduler.ComputeDAG(fusion_test(64, 32))
s = dag.get_init_state()
s.compute_at(1, 2, s.stages[2].iters[1])
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
names = auto_scheduler.feature.get_per_store_feature_names()
fea = auto_scheduler.feature.get_per_store_features_from_states([s], task)[0]
"""
lowered IR:
Placeholder: A
for i (0,64)
for j (0,32)
for ii (1)
for jj (1)
B[...] = A[...]
C[...] = B[...]
"""
# check reuse distance and reuse type after fusion
found = False
for stage_fea in fea:
for i, (name, value) in enumerate(zip(names, stage_fea)):
if "reuse_type.kSerialMultipleReadWrite" in name and value > 0.5:
# reuse distance in #iter
assert fequal(stage_fea[i + 2], 1.0)
# reuse distance in bytes
assert fequal(stage_fea[i + 3], math.log2(16 + 1))
found = True
assert found
def test_gpu_feature():
# Use records to build a complicated GPU program
json_records = "\n".join(
(
"""{"i": [["[\\"matmul_auto_scheduler_test\\", 512, 512, 512]", "cuda"], [[], [["CHW", 2, "local"], ["SP", 2, 0, 512, [1, 16, 32, 1], 1], ["SP", 2, 5, 512, [4, 1, 1, 16], 1], ["SP", 2, 10, 512, [1, 2], 1], ["RE", 2, [0, 5, 1, 6, 2, 7, 10, 11, 3, 8, 12, 4, 9]], ["FSP", 3, 0, 1, 3], ["FSP", 3, 4, 2, 3], ["RE", 3, [0, 4, 1, 5, 2, 6, 3, 7]], ["FU", 2, [0, 1]], ["FU", 3, [0, 1]], ["FU", 2, [1, 2]], ["FU", 3, [1, 2]], ["FU", 2, [2, 3]], ["FU", 3, [2, 3]], ["CA", 2, 3, 2], ["CHR", 1, "shared", [2]], ["CA", 2, 3, 3], ["FU", 2, [0, 1]], ["FFSP", 2, 0, [1, 2], 1, 1], ["AN", 2, 1, 6], ["CHR", 0, "shared", [3]], ["CA", 1, 4, 3], ["FU", 1, [0, 1]], ["FFSP", 1, 0, [1, 2], 1, 1], ["AN", 1, 1, 6], ["AN", 5, 0, 5], ["AN", 5, 1, 4], ["AN", 5, 2, 6], ["PR", 4, 0, "auto_unroll_max_step$1024"]]]], "r": [[0.00536798], 0, 2.49277, 1585564852], "v": "v0.1"}""",
)
)
# load states
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write(json_records)
f.flush()
inputs, _ = auto_scheduler.RecordReader(f.name).read_lines()
inp = inputs[0]
task = auto_scheduler.SearchTask(
workload_key=inp.task.workload_key,
target=inp.task.target,
hardware_params=auto_scheduler.HardwareParams(
100000, 16, 64, 1 << 30, 1 << 30, 1 << 30, 1 << 30, 1 << 30
),
)
state = task.compute_dag.infer_bound_from_state(inputs[0].state)
fea = auto_scheduler.feature.get_per_store_features_from_states([state], task)[0]
names = auto_scheduler.feature.get_per_store_feature_names()
# build feature dict
fea_dicts = []
for i in range(len(fea)):
tmp_dict = {}
for j in range(len(names)):
tmp_dict[names[j]] = fea[i][j]
fea_dicts.append(tmp_dict)
"""
lowered IR:
Placeholder: A, B
blockIdx.x i.0@j.0@ (0,8)
vthread i.1@j.1@ (0,4)
threadIdx.x i.2@j.2@ (0,16)
C.local auto_unroll: 1024
for k.0 (0,256)
for ax0@ax1@.0 (0,8)
threadIdx.x ax0@ax1@.1 (0,16)
B.shared = ...
for ax0@ax1@.0 (0,64)
threadIdx.x ax0@ax1@.1 (0,16)
A.shared = ...
for i_c.3 (0,32)
for k.2 (0,2)
for j_c.4 (0,16)
C.local = ...
for i.3 (0,32)
for j.3 (0,16)
C = ...
"""
# check gpu-related features
assert fequal(fea_dicts[0]["blockIdx_x_len"], math.log2(8 + 1))
assert fequal(fea_dicts[0]["vthread_len"], math.log2(4 + 1))
assert fequal(fea_dicts[1]["threadIdx_x_len"], math.log2(16 + 1))
assert fequal(fea_dicts[0]["threadIdx_y_len"], math.log2(1 + 1))
assert fequal(fea_dicts[2]["blockIdx_z_len"], math.log2(1 + 1))
assert fequal(fea_dicts[0]["is_gpu"], 1.0)
if __name__ == "__main__":
test_cpu_matmul()
test_cpu_fusion()
test_gpu_feature()
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scenario factory."""
import concurrent
import random
from typing import Any, Callable, Collection, Iterable, List, Mapping, Sequence, Tuple, TypeVar
import chex
import dm_env
import immutabledict
from ml_collections import config_dict
import numpy as np
import rx
from rx import subject
from meltingpot.python import bot as bot_factory
from meltingpot.python import substrate as substrate_factory
from meltingpot.python.configs import scenarios as scenario_config
from meltingpot.python.utils.scenarios.wrappers import agent_slot_wrapper
from meltingpot.python.utils.scenarios.wrappers import all_observations_wrapper
from meltingpot.python.utils.scenarios.wrappers import base
from meltingpot.python.utils.scenarios.wrappers import default_observation_wrapper
AVAILABLE_SCENARIOS = frozenset(scenario_config.SCENARIOS)
SCENARIOS_BY_SUBSTRATE: Mapping[
str, Collection[str]] = scenario_config.scenarios_by_substrate(
scenario_config.SCENARIOS)
PERMITTED_OBSERVATIONS = frozenset({
'INVENTORY',
'READY_TO_SHOOT',
'RGB',
})
T = TypeVar('T')
def _step_fn(policy: bot_factory.Policy) -> Callable[[dm_env.TimeStep], int]:
"""Returns a stateful step function where the state is encapsulated.
Args:
policy: the underlying policy to use.
Returns:
A step function that returns an action in response to a timestep.
"""
state = policy.initial_state()
def step(timestep: dm_env.TimeStep) -> int:
nonlocal state
action, state = policy.step(timestep=timestep, prev_state=state)
return action
return step
class Population:
"""A population of policies to use in a scenario."""
def __init__(self, policies: Mapping[str, bot_factory.Policy],
population_size: int) -> None:
"""Initializes the population.
Args:
policies: the policies to sample from (with replacement) each episode.
population_size: the number of policies to sample on each reset.
"""
self._policies = dict(policies)
self._population_size = population_size
self._executor = concurrent.futures.ThreadPoolExecutor(
max_workers=self._population_size)
self._step_fns: List[Callable[[dm_env.TimeStep], int]] = []
self._action_futures: List[concurrent.futures.Future] = []
def close(self):
"""Closes the population."""
for future in self._action_futures:
future.cancel()
self._executor.shutdown(wait=False)
for policy in self._policies.values():
policy.close()
def _sample_names(self) -> Sequence[str]:
"""Returns a sample of policy names for the population."""
return random.choices(tuple(self._policies), k=self._population_size)
def reset(self) -> None:
"""Resamples the population."""
names = self._sample_names()
self._step_fns = [_step_fn(self._policies[name]) for name in names]
for future in self._action_futures:
future.cancel()
self._action_futures.clear()
def send_timestep(self, timestep: dm_env.TimeStep) -> None:
"""Sends timestep to population for asynchronous processing.
Args:
timestep: The substrate timestep for the popualtion.
Raises:
RuntimeError: previous action has not been awaited.
"""
if self._action_futures:
raise RuntimeError('Previous action not retrieved.')
for n, step_fn in enumerate(self._step_fns):
bot_timestep = timestep._replace(
observation=timestep.observation[n], reward=timestep.reward[n])
future = self._executor.submit(step_fn, bot_timestep)
self._action_futures.append(future)
def await_action(self) -> Sequence[int]:
"""Waits for the population action in response to last timestep.
Returns:
The action for the population.
Raises:
RuntimeError: no timestep has been sent.
"""
if not self._action_futures:
raise RuntimeError('No timestep sent.')
actions = tuple(future.result() for future in self._action_futures)
self._action_futures.clear()
return actions
def _restrict_observation(
observation: Mapping[str, T],
permitted_observations: Collection[str],
) -> Mapping[str, T]:
"""Restricts an observation to only the permitted keys."""
return immutabledict.immutabledict({
key: observation[key]
for key in observation if key in permitted_observations
})
def _restrict_observations(
observations: Iterable[Mapping[str, T]],
permitted_observations: Collection[str],
) -> Sequence[Mapping[str, T]]:
"""Restricts multiple observations to only the permitted keys."""
return tuple(
_restrict_observation(observation, permitted_observations)
for observation in observations
)
def _partition(
values: Sequence[T],
is_focal: Sequence[bool],
) -> Tuple[Sequence[T], Sequence[T]]:
"""Partitions a sequence into focal and background sequences."""
focal_values = []
background_values = []
for focal, value in zip(is_focal, values):
if focal:
focal_values.append(value)
else:
background_values.append(value)
return tuple(focal_values), tuple(background_values)
def _merge(
focal_values: Sequence[T],
background_values: Sequence[T],
is_focal: Sequence[bool],
) -> Sequence[T]:
"""Merges focal and background sequences into one."""
focal_values = iter(focal_values)
background_values = iter(background_values)
return tuple(
next(focal_values if focal else background_values) for focal in is_focal
)
@chex.dataclass(frozen=True)
class PopulationObservables:
"""Observables for a population.
Attributes:
action: emits actions sent to the substrate by the poulation.
timestep: emits timesteps sent from the substrate to the population.
"""
action: rx.typing.Observable[Sequence[int]]
timestep: rx.typing.Observable[dm_env.TimeStep]
@chex.dataclass(frozen=True)
class ScenarioObservables(substrate_factory.SubstrateObservables):
"""Observables for a Scenario.
Attributes:
action: emits actions sent to the scenario from (focal) players.
timestep: emits timesteps sent from the scenario to (focal) players.
events: emits environment-specific events resulting from any interactions
with the scenario.
focal: observables from the perspective of the focal players.
background: observables from the perspective of the background players.
substrate: observables for the underlying substrate.
"""
focal: PopulationObservables
background: PopulationObservables
substrate: substrate_factory.SubstrateObservables
class Scenario(base.Wrapper):
"""An substrate where a number of player slots are filled by bots."""
def __init__(
self,
substrate,
bots: Mapping[str, bot_factory.Policy],
is_focal: Sequence[bool],
permitted_observations: Collection[str] = PERMITTED_OBSERVATIONS,
) -> None:
"""Initializes the scenario.
Args:
substrate: the substrate to add bots to.
bots: the bots to sample from (with replacement) each episode.
is_focal: which player slots are allocated to focal players.
permitted_observations: the observations exposed by the scenario to focal
agents.
"""
num_players = len(substrate.action_spec())
if len(is_focal) != num_players:
raise ValueError(f'is_focal is length {len(is_focal)} but substrate is '
f'{num_players}-player.')
super().__init__(substrate)
self._is_focal = is_focal
self._background_population = Population(
policies=bots, population_size=num_players - sum(is_focal))
self._permitted_observations = frozenset(permitted_observations)
self._focal_action_subject = subject.Subject()
self._focal_timestep_subject = subject.Subject()
self._background_action_subject = subject.Subject()
self._background_timestep_subject = subject.Subject()
self._events_subject = subject.Subject()
focal_observables = PopulationObservables(
action=self._focal_action_subject,
timestep=self._focal_timestep_subject,
)
background_observables = PopulationObservables(
action=self._background_action_subject,
timestep=self._background_timestep_subject,
)
self._observables = ScenarioObservables(
action=self._focal_action_subject,
events=self._events_subject,
timestep=self._focal_timestep_subject,
focal=focal_observables,
background=background_observables,
substrate=super().observables(),
)
def close(self) -> None:
"""See base class."""
self._background_population.close()
super().close()
self._focal_action_subject.on_completed()
self._background_action_subject.on_completed()
self._focal_timestep_subject.on_completed()
self._background_timestep_subject.on_completed()
self._events_subject.on_completed()
def _await_full_action(self, focal_action: Sequence[int]) -> Sequence[int]:
"""Returns full action after awaiting bot actions."""
self._focal_action_subject.on_next(focal_action)
background_action = self._background_population.await_action()
self._background_action_subject.on_next(background_action)
return _merge(focal_action, background_action, self._is_focal)
def _split_timestep(
self, timestep: dm_env.TimeStep
) -> Tuple[dm_env.TimeStep, dm_env.TimeStep]:
"""Splits multiplayer timestep as needed by agents and bots."""
focal_rewards, background_rewards = _partition(timestep.reward,
self._is_focal)
focal_observations, background_observations = _partition(
timestep.observation, self._is_focal)
focal_observations = _restrict_observations(focal_observations,
self._permitted_observations)
focal_timestep = timestep._replace(
reward=focal_rewards, observation=focal_observations)
background_timestep = timestep._replace(
reward=background_rewards, observation=background_observations)
return focal_timestep, background_timestep
def _send_full_timestep(self, timestep: dm_env.TimeStep) -> dm_env.TimeStep:
"""Returns focal timestep and sends background timestep to bots."""
focal_timestep, background_timestep = self._split_timestep(timestep)
self._background_timestep_subject.on_next(background_timestep)
self._background_population.send_timestep(background_timestep)
self._focal_timestep_subject.on_next(focal_timestep)
return focal_timestep
def reset(self) -> dm_env.TimeStep:
"""See base class."""
self._background_population.reset()
timestep = super().reset()
focal_timestep = self._send_full_timestep(timestep)
for event in self.events():
self._events_subject.on_next(event)
return focal_timestep
def step(self, action: Sequence[int]) -> dm_env.TimeStep:
"""See base class."""
action = self._await_full_action(focal_action=action)
timestep = super().step(action)
focal_timestep = self._send_full_timestep(timestep)
for event in self.events():
self._events_subject.on_next(event)
return focal_timestep
def events(self) -> Sequence[Tuple[str, Any]]:
"""See base class."""
# Do not emit substrate events as these may not make sense in the context
# of a scenario (e.g. player indices may have changed).
return ()
def action_spec(self) -> Sequence[dm_env.specs.DiscreteArray]:
"""See base class."""
focal_action_spec, _ = _partition(super().action_spec(), self._is_focal)
return focal_action_spec
def observation_spec(self) -> Sequence[Mapping[str, dm_env.specs.Array]]:
"""See base class."""
focal_observation_spec, _ = _partition(super().observation_spec(),
self._is_focal)
return _restrict_observations(focal_observation_spec,
self._permitted_observations)
def reward_spec(self) -> Sequence[dm_env.specs.Array]:
"""See base class."""
# TODO(b/192925212): better typing to avoid pytype disables.
reward_spec: Sequence[dm_env.specs.Array] = super().reward_spec() # pytype: disable=annotation-type-mismatch
focal_reward_spec, _ = _partition(reward_spec, self._is_focal)
return focal_reward_spec
def observables(self) -> ScenarioObservables:
"""Returns the observables for the scenario."""
return self._observables
def get_config(scenario_name: str) -> config_dict.ConfigDict:
"""Returns a config for the specified scenario.
Args:
scenario_name: Name of the scenario. Must be in AVAILABLE_SCENARIOS.
"""
if scenario_name not in AVAILABLE_SCENARIOS:
raise ValueError(f'Unknown scenario {scenario_name!r}')
scenario = scenario_config.SCENARIOS[scenario_name]
substrate = substrate_factory.get_config(scenario.substrate)
bots = {name: bot_factory.get_config(name) for name in scenario.bots}
config = config_dict.create(
substrate=substrate,
bots=bots,
is_focal=scenario.is_focal,
num_players=sum(scenario.is_focal),
num_bots=len(scenario.is_focal) - sum(scenario.is_focal),
)
return config.lock()
def build(config: config_dict.ConfigDict) -> Scenario:
"""Builds a scenario for the given config.
Args:
config: config resulting from `get_config`.
Returns:
The test scenario.
"""
substrate = substrate_factory.build(config.substrate)
bots = {
bot_name: bot_factory.build(bot_config)
for bot_name, bot_config in config.bots.items()
}
# Add observations needed by some bots. These are removed for focal players.
substrate_observations = set(substrate.observation_spec()[0])
substrate = all_observations_wrapper.Wrapper(
substrate, observations_to_share=['POSITION'], share_actions=True)
substrate = agent_slot_wrapper.Wrapper(substrate)
if 'INVENTORY' not in substrate_observations:
substrate = default_observation_wrapper.Wrapper(
substrate, key='INVENTORY', default_value=np.zeros([1]))
return Scenario(
substrate=substrate,
bots=bots,
is_focal=config.is_focal,
permitted_observations=PERMITTED_OBSERVATIONS & substrate_observations)
|
|
#!/usr/bin/env python
"""
amnonscript
amnonutils.py
heatsequer
various utility functions
"""
import numpy as np
import scipy as sp
import inspect
import os
import logging
import time
__version__ = "0.2"
def start_log(level=logging.DEBUG, filename='log.hs.log'):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or None (default) to print to screen
"""
logging.basicConfig(filename=filename, level=level,format='%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('logging started')
def Debug(dlevel,*args):
if dlevel>=DebugLevel:
logger = logging.getLogger(__name__)
logger.debug(args)
print (args)
def SetDebugLevel(dlevel):
"""
set the debug level for output
0 - all info (debug)
5 - warnings / info
9 - critical
input:
dlevel : int (0-10)
the minimum message level to show
"""
global DebugLevel
DebugLevel = dlevel
def reverse(seq):
oseq=''
for a in seq:
oseq=a+oseq
return oseq
def complement(seq):
seq=seq.upper()
oseq=''
for a in seq:
if a=='A':
oseq+='T'
elif a=='C':
oseq+='G'
elif a=='G':
oseq+='C'
elif a=='T':
oseq+='A'
else:
oseq+='N'
return oseq
def revcomp(seq):
return reverse(complement(seq))
def iterfastaseqs(filename):
"""
iterate a fasta file and return header,sequence
input:
filename - the fasta file name
output:
seq - the sequence
header - the header
"""
fl=open(filename,"rU")
cseq=''
chead=''
for cline in fl:
if cline[0]=='>':
if chead:
yield(cseq,chead)
cseq=''
chead=cline[1:].rstrip()
else:
cseq+=cline.strip()
if cseq:
yield(cseq,chead)
fl.close()
def readfastaseqs(filename):
"""
read a fasta file and return a list of sequences
input:
filename - the fasta file name
output:
seqs - a list of sequences
headers - a list of the headers
"""
fl=open(filename,"rU")
cseq=''
seqs=[]
headers=[]
for cline in fl:
if cline[0]=='>':
headers.append(cline[1:].rstrip())
if cseq:
seqs.append(cseq)
cseq=''
else:
cseq+=cline.strip()
if cseq:
seqs.append(cseq)
return seqs,headers
def isort(clist,reverse=False):
"""
matlab style sort
returns both sorted list and the indices of the sort
input:
clist: a list to sort
reverse - true to reverse the sort direction
output:
(svals,sidx)
svals - the sorted values
sidx - the sorted indices
"""
res=sorted(enumerate(clist), key=lambda x:x[1],reverse=reverse)
svals=[i[1] for i in res]
sidx=[i[0] for i in res]
return svals,sidx
def tofloat(clist):
"""
convert a list of strings to a list of floats
input:
clist - list of strings
output:
res - list of floats
"""
res=[]
for s in clist:
try:
cval=float(s)
except:
cval=0
if np.isnan(cval):
cval=0
res.append(cval)
return res
def reorder(clist,idx):
""""
reorder a list according to idx
"""
return [clist[i] for i in idx]
def delete(clist,idx):
"""
delete elements from list
"""
for i in sorted(idx, reverse=True):
del clist[i]
return clist
def clipstrings(clist,maxlen,reverse=False):
"""
clip all strings in a list to maxlen
input:
clist - list of strings
maxlen - maximal length for each string
reverse - if true - clip from end (otherwise from beginning)
"""
retlist=[]
for cstr in clist:
clen=min(maxlen,len(cstr))
if reverse:
retlist.append(cstr[-clen:])
else:
retlist.append(cstr[0:clen])
return retlist
def mlhash(cstr,emod=0):
"""
do a hash function on the string cstr
based on the matlab hash function string2hash
input:
cstr - the string to hash
emod - if 0, don't do modulu, otherwise do modulo
"""
chash = 5381
pnum=pow(2,32)-1
for cc in cstr:
chash=np.mod(chash*33+ord(cc),pnum)
if emod>0:
chash=np.mod(chash,emod)
return(chash)
def nicenum(num):
"""
get a nice string representation of the numnber
(turn to K/M if big, m/u if small, trim numbers after decimal point)
input:
num - the number
output:
numstr - the nice string of the number
"""
if num==0:
numstr="0"
elif abs(num)>1000000:
numstr="%.1fM" % (float(num)/1000000)
elif abs(num)>1000:
numstr="%.1fK" % (float(num)/1000)
elif abs(num)<0.000001:
numstr="%.1fu" % (num*1000000)
elif abs(num)<0.001:
numstr="%.1fm" % (num*1000)
else:
numstr=int(num)
return numstr
def SeqToArray(seq):
""" convert a string sequence to a numpy array"""
seqa=np.zeros(len(seq),dtype=np.int8)
for ind,base in enumerate(seq):
if base=='A':
seqa[ind]=0
elif base=='a':
seqa[ind]=0
elif base=='C':
seqa[ind]=1
elif base=='c':
seqa[ind]=1
elif base=='G':
seqa[ind]=2
elif base=='g':
seqa[ind]=2
elif base=='T':
seqa[ind]=3
elif base=='t':
seqa[ind]=3
elif base=='-':
seqa[ind]=4
else:
seqa[ind]=5
return(seqa)
def ArrayToSeq(seqa):
""" convert a numpy array to sequence (upper case)"""
seq=''
for cnuc in seqa:
if cnuc==0:
seq+='A'
elif cnuc==1:
seq+='C'
elif cnuc==2:
seq+='G'
elif cnuc==3:
seq+='T'
else:
seq+='N'
return(seq)
def fdr2(p):
"""Benjamini-Hochberg p-value correction for multiple hypothesis testing."""
p = np.asfarray(p)
by_descend = p.argsort()[::-1]
by_orig = by_descend.argsort()
steps = float(len(p)) / np.arange(len(p), 0, -1)
q = np.minimum(1, np.minimum.accumulate(steps * p[by_descend]))
return q[by_orig]
def fdr(pvalues, correction_type = "Benjamini-Hochberg"):
"""
consistent with R - print correct_pvalues_for_multiple_testing([0.0, 0.01, 0.029, 0.03, 0.031, 0.05, 0.069, 0.07, 0.071, 0.09, 0.1])
"""
pvalues = np.array(pvalues)
n = float(pvalues.shape[0])
new_pvalues = np.empty(n)
if correction_type == "Bonferroni":
new_pvalues = n * pvalues
elif correction_type == "Bonferroni-Holm":
values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]
values.sort()
for rank, vals in enumerate(values):
pvalue, i = vals
new_pvalues[i] = (n-rank) * pvalue
elif correction_type == "Benjamini-Hochberg":
values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]
values.sort()
values.reverse()
new_values = []
for i, vals in enumerate(values):
rank = n - i
pvalue, index = vals
new_values.append((n/rank) * pvalue)
for i in range(0, int(n)-1):
if new_values[i] < new_values[i+1]:
new_values[i+1] = new_values[i]
for i, vals in enumerate(values):
pvalue, index = vals
new_pvalues[index] = new_values[i]
return new_pvalues
def common_start(sa,sb):
"""
returns the longest common substring from the beginning of sa and sb
from http://stackoverflow.com/questions/18715688/find-common-substring-between-two-strings
"""
def _iter():
for a, b in zip(sa, sb):
if a == b:
yield a
else:
return
return ''.join(_iter())
DebugLevel=5
def listdel(dat,todel):
"""
delete elements with indices from list todel in the list dat
input:
dat - the list to remove elements from
todel - indices of the items to remove
output:
dat - the new deleted list
"""
for cind in sorted(todel, reverse=True):
del dat[cind]
return dat
def listtodict(dat):
"""
convert a list into a dict with keys as elements, values the position in the list
input:
dat - the list
output:
thedict
"""
thedict={}
for idx,cdat in enumerate(dat):
if cdat in thedict:
thedict[cdat].append(idx)
else:
thedict[cdat]=[idx]
return thedict
def savelisttofile(dat,filename,delimiter='\t'):
"""
save a list to a (tab delimited) file
inputL
dat - the list to save
filename - the filename to save to
delimiter - the delimiter to use
"""
with open(filename,'w') as fl:
fl.write(delimiter.join(dat))
def dictupper(dat):
"""
turn dict keys to upper case
input:
dat - a dict with string keys
output:
newdat - a dict with the upper case keys
"""
newdat = {k.upper(): v for k,v in dat.iteritems()}
return newdat
def listupper(dat):
"""
turn a list of strings to upper case
input:
dat : list of strings
output:
newdat : list of strings
- in uppercase
"""
newdat = [cstr.upper() for cstr in dat]
return newdat
def get_current_data_path(fn, subfolder='data'):
"""Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
Taken from scikit-bio (Thanks!)
"""
# getouterframes returns a list of tuples: the second tuple
# contains info about the caller, and the second element is its
# filename
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
def findn(text,substr,num):
"""
find num-th occurance of substr in text
input:
text : string
the string to search in
substr : string
the substring to search for in text
num : int
the occurance number (1 is the first occurance, etc)
output:
index : int
the position of the start of the num-th substring in text, or -1 if not present
"""
index=0
while index < len(text):
index = text.find(substr, index)
if index == -1:
break
num-=1
if num==0:
return index
index+=len(substr)
return -1
def getnicetax(name,separator=';'):
"""
get the last non empty string (separated by separator)
used to get a nice taxonomy name from a taxonomy string
input:
name : str
the taxonomy string
separator: str
the separator between taxonomic levels (i.e. ';')
output:
nicename : str
only the last non empty part of name
"""
nicename='unknown'
s=name.split(separator)
for cstr in s:
if len(cstr)>0:
nicename=cstr
return nicename
def sum(data,axis=None):
"""
Get the sum of the numpy array/scipy sparse matrix
input:
data : numpy array or scipy matrix
the data to sum
axis : int or None
equivalent to axis parameter on numpy sum
output:
csum : a 1d numpy array
the sum
"""
# if sparse matrix, the output is a matrix
# so we need to convert to 1d array
if sp.sparse.isspmatrix(data):
csum=data.sum(axis=axis).A.flatten()
else:
csum=np.sum(data,axis=axis)
return csum
def mean(data,axis=None):
"""
Get the mean of the numpy array/scipy sparse matrix
input:
data : numpy array or scipy matrix
the data to sum
axis : int or None
equivalent to axis parameter on numpy sum
output:
cmean : a 1d numpy array
the mean
"""
# if sparse matrix, the output is a matrix
# so we need to convert to 1d array
if sp.sparse.isspmatrix(data):
cmean=data.mean(axis=axis).A.flatten()
else:
cmean=np.mean(data,axis=axis)
return cmean
def median(data,axis=None):
"""
Get the mean of the numpy array/scipy sparse matrix
input:
data : numpy array or scipy matrix
the data to sum
axis : int or None
equivalent to axis parameter on numpy sum
output:
cmedian : a 1d numpy array
the mean
"""
# if sparse matrix, the output is a matrix
# so we need to convert to 1d array
if sp.sparse.isspmatrix(data):
Debug(9,'Median not supported for sparse matrix. come back one year')
cdat=data.todense().A[0]
cmedian=np.median(cdat,axis=axis)
else:
cmedian=np.median(data,axis=axis)
return cmedian
def divvec(data,vec):
"""
divide the data matrix data by the vector vec
works for sparse and non sparse data types
input:
data : sparse matrix or numpy array
the data to divide each colum by the vector element
vec : numpy array
the vector to devide by
output:
data : same as data
each element in each column in data divided by the corresponding vec element
"""
if sp.sparse.isspmatrix(data):
# for cpos,cval in data.items():
# data[cpos]=cval/vec[cpos[1]]
numcols=data.shape[1]
b=sp.sparse.lil_matrix( (numcols,numcols) )
for idx in range(numcols):
b[idx,idx]=1.0/vec[idx]
b=b.tocsr()
data=data*b
else:
data=data/vec
return data
def multvec(data,vec):
"""
multiply the data matrix data by the vector vec
works for sparse and non sparse data types
input:
data : sparse matrix or numpy array
the data to multiply each colum by the vector element
vec : numpy array
the vector to multiply by
output:
data : same as data
each element in each column in data multiplied by the corresponding vec element
"""
if sp.sparse.isspmatrix(data):
numcols=data.shape[1]
b=sp.sparse.lil_matrix( (numcols,numcols) )
for idx in range(numcols):
b[idx,idx]=vec[idx]
b=b.tocsr()
data=data*b
else:
data=data*vec
return data
def log2(data,minthresh=2):
"""
calculate the log2 of the data in dense or sparse format. numbers below minthresh are rounded to minthresh
input:
data : numpy array of sparse matrix
the 2d array to calculate the log2
minthresh: float
the minimal number of reads
(data numbers below minthresh are rounded to minthresh)
output:
ldat : numpy array or sparse matrix
log2 of the data matrix after thresholding. output format is similar to input
"""
if sp.sparse.isspmatrix(data):
pass
else:
data[data<minthresh]=minthresh
ldat=np.log2(data)
return ldat
|
|
#!/usr/bin/env python
# $Id: authorizers.py 979 2012-01-23 19:32:22Z g.rodola $
# pyftpdlib is released under the MIT license, reproduced below:
# ======================================================================
# Copyright (C) 2007-2012 Giampaolo Rodola' <g.rodola@gmail.com>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# ======================================================================
"""An "authorizer" is a class handling authentications and permissions
of the FTP server. It is used by pyftpdlib.ftpserver.FTPHandler
class for:
- verifying user password
- getting user home directory
- checking user permissions when a filesystem read/write event occurs
- changing user when accessing the filesystem
This module contains two classes which implements such functionalities
in a system-specific way for both Unix and Windows.
"""
__all__ = []
import os
import errno
from pyftpdlib.ftpserver import DummyAuthorizer, AuthorizerError
def replace_anonymous(callable):
"""A decorator to replace anonymous user string passed to authorizer
methods as first arugument with the actual user used to handle
anonymous sessions.
"""
def wrapper(self, username, *args, **kwargs):
if username == 'anonymous':
username = self.anonymous_user or username
return callable(self, username, *args, **kwargs)
return wrapper
class _Base(object):
"""Methods common to both Unix and Windows authorizers.
Not supposed to be used directly.
"""
def __init__(self):
"""Check for errors in the constructor."""
if self.rejected_users and self.allowed_users:
raise ValueError("rejected_users and allowed_users options are "
"mutually exclusive")
users = self._get_system_users()
for user in (self.allowed_users or self.rejected_users):
if user == 'anonymous':
raise ValueError('invalid username "anonymous"')
if user not in users:
raise ValueError('unknown user %s' % user)
if self.anonymous_user is not None:
if not self.has_user(self.anonymous_user):
raise ValueError('no such user %s' % self.anonymous_user)
home = self.get_home_dir(self.anonymous_user)
if not os.path.isdir(home):
raise ValueError('no valid home set for user %s'
% self.anonymous_user)
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
if not password and not homedir and not perm and not msg_login \
and not msg_quit:
raise ValueError("at least one keyword argument must be specified")
if self.allowed_users and username not in self.allowed_users:
raise ValueError('%s is not an allowed user' % username)
if self.rejected_users and username in self.rejected_users:
raise ValueError('%s is not an allowed user' % username)
if username == "anonymous" and password:
raise ValueError("can't assign password to anonymous user")
if not self.has_user(username):
raise ValueError('no such user %s' % username)
if username in self._dummy_authorizer.user_table:
# re-set parameters
del self._dummy_authorizer.user_table[username]
self._dummy_authorizer.add_user(username, password or "",
homedir or os.getcwd(),
perm or "",
msg_login or "",
msg_quit or "")
if homedir is None:
self._dummy_authorizer.user_table[username]['home'] = ""
def get_msg_login(self, username):
return self._get_key(username, 'msg_login') or self.msg_login
def get_msg_quit(self, username):
return self._get_key(username, 'msg_quit') or self.msg_quit
def get_perms(self, username):
overridden_perms = self._get_key(username, 'perm')
if overridden_perms:
return overridden_perms
if username == 'anonymous':
return 'elr'
return self.global_perm
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
def _get_key(self, username, key):
if self._dummy_authorizer.has_user(username):
return self._dummy_authorizer.user_table[username][key]
def _is_rejected_user(self, username):
"""Return True if the user has been black listed via
allowed_users or rejected_users options.
"""
if self.allowed_users and username not in self.allowed_users:
return True
if self.rejected_users and username in self.rejected_users:
return True
return False
# Note: requires python >= 2.5
try:
import pwd, spwd, crypt
except ImportError:
pass
else:
__all__.extend(['BaseUnixAuthorizer', 'UnixAuthorizer'])
# the uid/gid the server runs under
PROCESS_UID = os.getuid()
PROCESS_GID = os.getgid()
class BaseUnixAuthorizer(object):
"""An authorizer compatible with Unix user account and password
database.
This class should not be used directly unless for subclassing.
Use higher-level UnixAuthorizer class instead.
"""
def __init__(self, anonymous_user=None):
if os.geteuid() != 0 or not spwd.getspall():
raise AuthorizerError("super user privileges are required")
self.anonymous_user = anonymous_user
if self.anonymous_user is not None:
if not self.anonymous_user in self._get_system_users():
raise ValueError('no such user %s' % self.anonymous_user)
try:
pwd.getpwnam(self.anonymous_user).pw_dir
except KeyError:
raise ValueError('no such user %s' % anonymous_user)
# --- overridden / private API
def validate_authentication(self, username, password):
"""Authenticates against shadow password db; return
True on success.
"""
if username == "anonymous":
return self.anonymous_user is not None
try:
pw1 = spwd.getspnam(username).sp_pwd
pw2 = crypt.crypt(password, pw1)
except KeyError: # no such username
return False
else:
return pw1 == pw2
@replace_anonymous
def impersonate_user(self, username, password):
"""Change process effective user/group ids to reflect
logged in user.
"""
try:
pwdstruct = pwd.getpwnam(username)
except KeyError:
raise AuthorizerError('no such user %s' % username)
else:
os.setegid(pwdstruct.pw_gid)
os.seteuid(pwdstruct.pw_uid)
def terminate_impersonation(self, username):
"""Revert process effective user/group IDs."""
os.setegid(PROCESS_GID)
os.seteuid(PROCESS_UID)
@replace_anonymous
def has_user(self, username):
"""Return True if user exists on the Unix system.
If the user has been black listed via allowed_users or
rejected_users options always return False.
"""
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
"""Return user home directory."""
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise AuthorizerError('no such user %s' % username)
@staticmethod
def _get_system_users():
"""Return all users defined on the UNIX system."""
return [entry.pw_name for entry in pwd.getpwall()]
def get_msg_login(self, username):
return "Login successful."
def get_msg_quit(self, username):
return "Goodbye."
def get_perms(self, username):
return "elradfmw"
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
class UnixAuthorizer(_Base, BaseUnixAuthorizer):
"""A wrapper on top of BaseUnixAuthorizer providing options
to specify what users should be allowed to login, per-user
options, etc.
Example usages:
>>> from pyftpdlib.contrib.authorizers import UnixAuthorizer
>>> # accept all except root
>>> auth = UnixAuthorizer(rejected_users=["root"])
>>>
>>> # accept some users only
>>> auth = UnixAuthorizer(allowed_users=["matt", "jay"])
>>>
>>> # accept everybody and don't care if they have not a valid shell
>>> auth = UnixAuthorizer(require_valid_shell=False)
>>>
>>> # set specific options for a user
>>> auth.override_user("matt", password="foo", perm="elr")
"""
# --- public API
def __init__(self, global_perm="elradfmw",
allowed_users=[],
rejected_users=[],
require_valid_shell=True,
anonymous_user=None,
msg_login="Login successful.",
msg_quit="Goodbye."):
"""Parameters:
- (string) global_perm:
a series of letters referencing the users permissions;
defaults to "elradfmw" which means full read and write
access for everybody (except anonymous).
- (list) allowed_users:
a list of users which are accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (list) rejected_users:
a list of users which are not accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (bool) require_valid_shell:
Deny access for those users which do not have a valid shell
binary listed in /etc/shells.
If /etc/shells cannot be found this is a no-op.
Anonymous user is not subject to this option, and is free
to not have a valid shell defined.
Defaults to True (a valid shell is required for login).
- (string) anonymous_user:
specify it if you intend to provide anonymous access.
The value expected is a string representing the system user
to use for managing anonymous sessions; defaults to None
(anonymous access disabled).
- (string) msg_login:
the string sent when client logs in.
- (string) msg_quit:
the string sent when client quits.
"""
BaseUnixAuthorizer.__init__(self, anonymous_user)
self.global_perm = global_perm
self.allowed_users = allowed_users
self.rejected_users = rejected_users
self.anonymous_user = anonymous_user
self.require_valid_shell = require_valid_shell
self.msg_login = msg_login
self.msg_quit = msg_quit
self._dummy_authorizer = DummyAuthorizer()
self._dummy_authorizer._check_permissions('', global_perm)
_Base.__init__(self)
if require_valid_shell:
for username in self.allowed_users:
if not self._has_valid_shell(username):
raise ValueError("user %s has not a valid shell"
% username)
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
if self.require_valid_shell and username != 'anonymous':
if not self._has_valid_shell(username):
raise ValueError("user %s has not a valid shell"
% username)
_Base.override_user(self, username, password, homedir, perm,
msg_login, msg_quit)
# --- overridden / private API
def validate_authentication(self, username, password):
if username == "anonymous":
return self.anonymous_user is not None
if self._is_rejected_user(username):
return False
if self.require_valid_shell and username != 'anonymous':
if not self._has_valid_shell(username):
return False
overridden_password = self._get_key(username, 'pwd')
if overridden_password:
return overridden_password == password
return BaseUnixAuthorizer.validate_authentication(self, username, password)
@replace_anonymous
def has_user(self, username):
if self._is_rejected_user(username):
return False
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
overridden_home = self._get_key(username, 'home')
if overridden_home:
return overridden_home
return BaseUnixAuthorizer.get_home_dir(self, username)
@staticmethod
def _has_valid_shell(username):
"""Return True if the user has a valid shell binary listed
in /etc/shells. If /etc/shells can't be found return True.
"""
file = None
try:
try:
file = open('/etc/shells', 'r')
except IOError, err:
if err.errno == errno.ENOENT:
return True
raise
else:
try:
shell = pwd.getpwnam(username).pw_shell
except KeyError: # invalid user
return False
for line in file:
if line.startswith('#'):
continue
line = line.strip()
if line == shell:
return True
return False
finally:
if file is not None:
file.close()
# Note: requires pywin32 extension
try:
import _winreg
import win32security, win32net, pywintypes, win32con, win32api
except ImportError:
pass
else:
__all__.extend(['BaseWindowsAuthorizer', 'WindowsAuthorizer'])
class BaseWindowsAuthorizer(object):
"""An authorizer compatible with Windows user account and
password database.
This class should not be used directly unless for subclassing.
Use higher-level WinowsAuthorizer class instead.
"""
def __init__(self, anonymous_user=None, anonymous_password=None):
# actually try to impersonate the user
self.anonymous_user = anonymous_user
self.anonymous_password = anonymous_password
if self.anonymous_user is not None:
self.impersonate_user(self.anonymous_user,
self.anonymous_password)
self.terminate_impersonation()
def validate_authentication(self, username, password):
if username == "anonymous":
return self.anonymous_user is not None
try:
win32security.LogonUser(username, None, password,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT)
except pywintypes.error:
return False
else:
return True
@replace_anonymous
def impersonate_user(self, username, password):
"""Impersonate the security context of another user."""
handler = win32security.LogonUser(username, None, password,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT)
win32security.ImpersonateLoggedOnUser(handler)
handler.Close()
def terminate_impersonation(self, username):
"""Terminate the impersonation of another user."""
win32security.RevertToSelf()
@replace_anonymous
def has_user(self, username):
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
"""Return the user's profile directory, the closest thing
to a user home directory we have on Windows.
"""
try:
sid = win32security.ConvertSidToStringSid(
win32security.LookupAccountName(None, username)[0])
except pywintypes.error, err:
raise AuthorizerError(err)
path = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList" + \
"\\" + sid
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError:
raise AuthorizerError("No profile directory defined for user %s"
% username)
value = _winreg.QueryValueEx(key, "ProfileImagePath")[0]
return win32api.ExpandEnvironmentStrings(value)
@classmethod
def _get_system_users(cls):
"""Return all users defined on the Windows system."""
return [entry['name'] for entry in win32net.NetUserEnum(None, 0)[0]]
def get_msg_login(self, username):
return "Login successful."
def get_msg_quit(self, username):
return "Goodbye."
def get_perms(self, username):
return "elradfmw"
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
class WindowsAuthorizer(_Base, BaseWindowsAuthorizer):
"""A wrapper on top of BaseWindowsAuthorizer providing options
to specify what users should be allowed to login, per-user
options, etc.
Example usages:
>>> from pyftpdlib.contrib.authorizers import WindowsAuthorizer
>>> # accept all except Administrator
>>> auth = UnixAuthorizer(rejected_users=["Administrator"])
>>>
>>> # accept some users only
>>> auth = UnixAuthorizer(allowed_users=["matt", "jay"])
>>>
>>> # set specific options for a user
>>> auth.override_user("matt", password="foo", perm="elr")
"""
# --- public API
def __init__(self, global_perm="elradfmw",
allowed_users=[],
rejected_users=[],
anonymous_user=None,
anonymous_password=None,
msg_login="Login successful.",
msg_quit="Goodbye."):
"""Parameters:
- (string) global_perm:
a series of letters referencing the users permissions;
defaults to "elradfmw" which means full read and write
access for everybody (except anonymous).
- (list) allowed_users:
a list of users which are accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (list) rejected_users:
a list of users which are not accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (string) anonymous_user:
specify it if you intend to provide anonymous access.
The value expected is a string representing the system user
to use for managing anonymous sessions.
As for IIS, it is recommended to use Guest account.
The common practice is to first enable the Guest user, which
is disabled by default and then assign an empty password.
Defaults to None (anonymous access disabled).
- (string) anonymous_password:
the password of the user who has been chosen to manage the
anonymous sessions. Defaults to None (empty password).
- (string) msg_login:
the string sent when client logs in.
- (string) msg_quit:
the string sent when client quits.
"""
self.global_perm = global_perm
self.allowed_users = allowed_users
self.rejected_users = rejected_users
self.anonymous_user = anonymous_user
self.anonymous_password = anonymous_password
self.msg_login = msg_login
self.msg_quit = msg_quit
self._dummy_authorizer = DummyAuthorizer()
self._dummy_authorizer._check_permissions('', global_perm)
_Base.__init__(self)
# actually try to impersonate the user
if self.anonymous_user is not None:
self.impersonate_user(self.anonymous_user,
self.anonymous_password)
self.terminate_impersonation()
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
_Base.override_user(self, username, password, homedir, perm,
msg_login, msg_quit)
# --- overridden / private API
def validate_authentication(self, username, password):
"""Authenticates against Windows user database; return
True on success.
"""
if username == "anonymous":
return self.anonymous_user is not None
if self.allowed_users and username not in self.allowed_users:
return False
if self.rejected_users and username in self.rejected_users:
return False
overridden_password = self._get_key(username, 'pwd')
if overridden_password:
return overridden_password == password
else:
return BaseWindowsAuthorizer.validate_authentication(self,
username, password)
def impersonate_user(self, username, password):
"""Impersonate the security context of another user."""
if username == "anonymous":
username = self.anonymous_user or ""
password = self.anonymous_password or ""
return BaseWindowsAuthorizer.impersonate_user(self, username, password)
@replace_anonymous
def has_user(self, username):
if self._is_rejected_user(username):
return False
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
overridden_home = self._get_key(username, 'home')
if overridden_home:
return overridden_home
return BaseWindowsAuthorizer.get_home_dir(self, username)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
from __future__ import print_function
import os
import sphinx.environment
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Do not warn on external images.
suppress_warnings = ['image.nonlocal_uri']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
graphviz_output_format = 'svg'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Invenio'
copyright = u'2015-2020, CERN'
author = u'CERN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('..', 'invenio', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_options = {
'logo': 'logo-full.png',
'description': 'Invenio Digital Library Framework.',
'github_user': 'inveniosoftware',
'github_repo': 'invenio',
'github_button': False,
'github_banner': True,
'show_powered_by': False,
'sidebar_collapse': True,
'show_relbar_bottom': True,
'extra_nav_links': {
'invenio@GitHub': 'https://github.com/inveniosoftware/invenio',
'invenio@PyPI': 'https://pypi.python.org/pypi/invenio/',
}
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'invenio_namedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'invenio.tex', u'invenio Documentation',
u'CERN', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'invenio', u'invenio Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'invenio', u'Invenio Documentation',
author, 'invenio', 'Invenio Digital Library Framework.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'flask': ('https://flask.palletsprojects.com/en/1.1.x/', None),
'flaskassets': ('https://flask-assets.readthedocs.io/en/latest/', None),
'flaskregistry': (
'https://flask-registry.readthedocs.io/en/latest/', None),
'flaskscript': ('https://flask-script.readthedocs.io/en/latest/', None),
'invenio-app': (
'https://invenio-app.readthedocs.io/en/latest/', None),
'invenio-access': (
'https://invenio-access.readthedocs.io/en/latest/', None),
'invenio-celery': (
'https://invenio-celery.readthedocs.io/en/latest/', None),
'invenio-config': (
'https://invenio-config.readthedocs.io/en/latest/', None),
'invenio-db': (
'https://invenio-db.readthedocs.io/en/latest/', None),
'invenio-formatter': (
'https://invenio-formatter.readthedocs.io/en/latest/', None),
'invenio-indexer': (
'https://invenio-indexer.readthedocs.io/en/latest/', None),
'invenio-files-rest': (
'https://invenio-files-rest.readthedocs.io/en/latest/', None),
'invenio-previewer': (
'https://invenio-previewer.readthedocs.io/en/latest/', None),
'invenio-iiif': (
'https://invenio-iiif.readthedocs.io/en/latest/', None),
'invenio-records-files': (
'https://invenio-records-files.readthedocs.io/en/latest/', None),
'invenio-records-rest': (
'https://invenio-records-rest.readthedocs.io/en/latest/', None),
'invenio-theme': (
'https://invenio-theme.readthedocs.io/en/latest/', None),
'jinja': ('https://jinja.palletsprojects.com/en/2.10.x/', None),
'python': ('https://docs.python.org/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/latest/', None),
'webassets': ('https://webassets.readthedocs.io/en/latest/', None),
'werkzeug': ('https://werkzeug.palletsprojects.com/en/0.16.x/', None),
}
# Autodoc configuration.
autoclass_content = 'both'
|
|
#!/usr/bin/python
# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import json
from hashlib import md5
try:
import boto3
import boto # seems to be needed for ansible.module_utils
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
DOCUMENTATION = '''
---
module: lambda_event
short_description: Creates, updates or deletes AWS Lambda function event mappings.
description:
- This module allows the management of AWS Lambda function event source mappings such as S3 bucket
events, DynamoDB and Kinesis streaming events via the Ansible framework.
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
function itself and M(lambda_alias) to manage function aliases.
version_added: "2.1"
author: Pierre Jodouin (@pjodouin)
options:
lambda_function_arn:
description:
- The name or ARN of the lambda function.
required: true
aliases: ['function_name', 'function_arn']
state:
description:
- Describes the desired state and defaults to "present".
required: true
default: "present"
choices: ["present", "absent"]
alias:
description:
- Name of the function alias. Mutually exclusive with C(version).
required: true
version:
description:
- Version of the Lambda function. Mutually exclusive with C(alias).
required: false
event_source:
description:
- Source of the event that triggers the lambda function.
required: true
choices: ['s3', 'Kinesis', 'DynamoDB', 'SNS']
source_params:
description:
- Sub-parameters required for event source.
- I(== S3 event source ==)
- C(id) Unique ID for this source event.
- C(bucket) Name of source bucket.
- C(prefix) Bucket prefix (e.g. images/)
- C(suffix) Bucket suffix (e.g. log)
- C(events) List of events (e.g. ['s3:ObjectCreated:Put'])
- I(== stream event source ==)
- C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
- C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
- C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
time of invoking your function. Default is 100.
- C(starting_position) The position in the stream where AWS Lambda should start reading.
Choices are TRIM_HORIZON or LATEST.
- I(== SNS event source ==)
- C(id) Unique ID for this source event.
- C(topic_arn) The ARN of the topic to which you want to subscribe the lambda function.
required: true
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Simple example that creates a lambda event notification for an S3 bucket
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: S3 event mapping
lambda_event:
state: "{{ state | default('present') }}"
event_source: s3
function_name: ingestData
alias: Dev
source_params:
id: lambda-s3-myBucket-create-data-log
bucket: buzz-scanner
prefix: twitter
suffix: log
events:
- s3:ObjectCreated:Put
# Example that creates a lambda event notification for a DynamoDB stream
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: DynamoDB stream event mapping
lambda_event:
state: "{{ state | default('present') }}"
event_source: stream
function_name: "{{ function_name }}"
alias: Dev
source_params:
source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
enabled: True
batch_size: 100
starting_position: TRIM_HORIZON
- name: show source event
debug: var=lambda_stream_events
# example of SNS topic
- name: SNS event mapping
lambda_event:
state: "{{ state | default('present') }}"
event_source: sns
function_name: SaveMessage
alias: Prod
source_params:
id: lambda-sns-topic-notify
topic_arn: arn:aws:sns:us-east-1:123456789012:sns-some-topic
- name: show SNS event mapping
debug: var=lambda_sns_event
'''
RETURN = '''
---
lambda_s3_events:
description: list of dictionaries returned by the API describing S3 event mappings
returned: success
type: list
lambda_stream_events:
description: list of dictionaries returned by the API describing stream event mappings
returned: success
type: list
lambda_sns_event:
description: dictionary returned by the API describing SNS event mapping
returned: success
type: dict
'''
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
# set account ID
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def ordered_obj(obj):
"""
Order object for comparison purposes
:param obj:
:return:
"""
if isinstance(obj, dict):
return sorted((k, ordered_obj(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered_obj(x) for x in obj)
else:
return obj
def set_api_sub_params(params):
"""
Sets module sub-parameters to those expected by the boto3 API.
:param module_params:
:return:
"""
api_params = dict()
for param in params.keys():
param_value = params.get(param, None)
if param_value:
api_params[pc(param)] = param_value
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
function_name = module.params['lambda_function_arn']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if ':' in function_name:
if len(function_name) > 140:
module.fail_json(msg='Function ARN "{0}" exceeds 140 character limit'.format(function_name))
else:
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# check if 'function_name' needs to be expanded in full ARN format
if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
function_name = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
qualifier = get_qualifier(module)
if qualifier:
function_arn = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
return
def get_qualifier(module):
"""
Returns the function qualifier as a version or alias or None.
:param module:
:return:
"""
qualifier = None
if module.params['version'] > 0:
qualifier = str(module.params['version'])
elif module.params['alias']:
qualifier = str(module.params['alias'])
return qualifier
def assert_policy_state(module, aws, policy, present=False):
"""
Asserts the desired policy statement is present/absent and adds/removes it accordingly.
:param module:
:param aws:
:param policy:
:param present:
:return:
"""
changed = False
currently_present = get_policy_state(module, aws, policy['statement_id'])
if present:
if not currently_present:
changed = add_policy_permission(module, aws, policy)
else:
if currently_present:
changed = remove_policy_permission(module, aws, policy['statement_id'])
return changed
def get_policy_state(module, aws, sid):
"""
Checks that policy exists and if so, that statement ID is present or absent.
:param module:
:param aws:
:param sid:
:return:
"""
client = aws.client('lambda')
policy = dict()
present = False
# set API parameters
api_params = dict(FunctionName=module.params['lambda_function_arn'])
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
# check if function policy exists
try:
# get_policy returns a JSON string so must convert to dict before reassigning to its key
policy_results = client.get_policy(**api_params)
policy = json.loads(policy_results.get('Policy', '{}'))
except (ClientError, ParamValidationError, MissingParametersError) as e:
if not e.response['Error']['Code'] == 'ResourceNotFoundException':
module.fail_json(msg='Error retrieving function policy: {0}'.format(e))
if 'Statement' in policy:
# now that we have the policy, check if required permission statement is present
for statement in policy['Statement']:
if statement['Sid'] == sid:
present = True
break
return present
def add_policy_permission(module, aws, policy_statement):
"""
Adds a permission statement to the policy.
:param module:
:param aws:
:param policy_statement:
:return:
"""
client = aws.client('lambda')
changed = False
# set API parameters
api_params = dict(FunctionName=module.params['lambda_function_arn'])
api_params.update(set_api_sub_params(policy_statement))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
try:
if not module.check_mode:
client.add_permission(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error adding permission to policy: {0}'.format(e))
return changed
def remove_policy_permission(module, aws, statement_id):
"""
Removed a permission statement from the policy.
:param module:
:param aws:
:param statement_id:
:return:
"""
client = aws.client('lambda')
changed = False
# set API parameters
api_params = dict(FunctionName=module.params['lambda_function_arn'])
api_params.update(StatementId=statement_id)
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
try:
if not module.check_mode:
client.remove_permission(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing permission from policy: {0}'.format(e))
return changed
# ---------------------------------------------------------------------------------------------------
#
# Lambda Event Handlers
#
# This section defines a lambda_event_X function where X is an AWS service capable of initiating
# the execution of a Lambda function.
#
# ---------------------------------------------------------------------------------------------------
def lambda_event_stream(module, aws):
"""
Adds, updates or deletes lambda stream (DynamoDb, Kinesis) envent notifications.
:param module:
:param aws:
:return:
"""
client = aws.client('lambda')
facts = dict()
changed = False
current_state = 'absent'
state = module.params['state']
api_params = dict(FunctionName=module.params['lambda_function_arn'])
# check if required sub-parameters are present and valid
source_params = module.params['source_params']
source_arn = source_params.get('source_arn')
if source_arn:
api_params.update(EventSourceArn=source_arn)
else:
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
# check if optional sub-parameters are valid, if present
batch_size = source_params.get('batch_size')
if batch_size:
try:
source_params['batch_size'] = int(batch_size)
except ValueError:
module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
# optional boolean value needs special treatment as not present does not imply False
source_param_enabled = None
if source_params.get('enabled') is not None:
source_param_enabled = module.boolean(source_params['enabled'])
# check if event mapping exist
try:
facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
if facts:
current_state = 'present'
except ClientError as e:
module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
if state == 'present':
if current_state == 'absent':
starting_position = source_params.get('starting_position')
if starting_position:
api_params.update(StartingPosition=starting_position)
else:
module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
enabled = source_params.get('enabled')
if source_arn:
api_params.update(Enabled=enabled)
batch_size = source_params.get('batch_size')
if batch_size:
api_params.update(BatchSize=batch_size)
try:
if not module.check_mode:
facts = client.create_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
else:
# current_state is 'present'
api_params = dict(FunctionName=module.params['lambda_function_arn'])
current_mapping = facts[0]
api_params.update(UUID=current_mapping['UUID'])
mapping_changed = False
# check if anything changed
if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
api_params.update(BatchSize=source_params['batch_size'])
mapping_changed = True
if source_param_enabled is not None:
if source_param_enabled:
if current_mapping['State'] not in ('Enabled', 'Enabling'):
api_params.update(Enabled=True)
mapping_changed = True
else:
if current_mapping['State'] not in ('Disabled', 'Disabling'):
api_params.update(Enabled=False)
mapping_changed = True
if mapping_changed:
try:
if not module.check_mode:
facts = client.update_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
else:
if current_state == 'present':
# remove the stream event mapping
api_params = dict(UUID=facts[0]['UUID'])
try:
if not module.check_mode:
facts = client.delete_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
return dict(changed=changed, ansible_facts=dict(lambda_stream_events=facts))
def lambda_event_s3(module, aws):
"""
Adds, updates or deletes lambda s3 event notifications.
:param module: Ansible module reference
:param aws:
:return dict:
"""
client = aws.client('s3')
api_params = dict()
changed = False
current_state = 'absent'
state = module.params['state']
# check if required sub-parameters are present
source_params = module.params['source_params']
if not source_params.get('id'):
module.fail_json(msg="Source parameter 'id' is required for S3 event notification.")
if source_params.get('bucket'):
api_params = dict(Bucket=source_params['bucket'])
else:
module.fail_json(msg="Source parameter 'bucket' is required for S3 event notification.")
# check if event notifications exist
try:
facts = client.get_bucket_notification_configuration(**api_params)
facts.pop('ResponseMetadata')
except ClientError as e:
module.fail_json(msg='Error retrieving s3 event notification configuration: {0}'.format(e))
current_lambda_configs = list()
matching_id_config = dict()
if 'LambdaFunctionConfigurations' in facts:
current_lambda_configs = facts.pop('LambdaFunctionConfigurations')
for config in current_lambda_configs:
if config['Id'] == source_params['id']:
matching_id_config = config
current_lambda_configs.remove(config)
current_state = 'present'
break
if state == 'present':
# build configurations
new_configuration = dict(Id=source_params.get('id'))
new_configuration.update(LambdaFunctionArn=module.params['lambda_function_arn'])
filter_rules = []
if source_params.get('prefix'):
filter_rules.append(dict(Name='Prefix', Value=str(source_params.get('prefix'))))
if source_params.get('suffix'):
filter_rules.append(dict(Name='Suffix', Value=str(source_params.get('suffix'))))
if filter_rules:
new_configuration.update(Filter=dict(Key=dict(FilterRules=filter_rules)))
if source_params.get('events'):
new_configuration.update(Events=source_params['events'])
if current_state == 'present':
# check if source event configuration has changed
if ordered_obj(matching_id_config) == ordered_obj(new_configuration):
current_lambda_configs.append(matching_id_config)
else:
# update s3 event notification for lambda
current_lambda_configs.append(new_configuration)
facts.update(LambdaFunctionConfigurations=current_lambda_configs)
api_params = dict(NotificationConfiguration=facts, Bucket=source_params['bucket'])
try:
if not module.check_mode:
client.put_bucket_notification_configuration(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating s3 event notification for lambda: {0}'.format(e))
else:
# add policy permission before creating the event notification
policy = dict(
statement_id=source_params['id'],
action='lambda:InvokeFunction',
principal='s3.amazonaws.com',
source_arn='arn:aws:s3:::{0}'.format(source_params['bucket']),
source_account=aws.account_id,
)
assert_policy_state(module, aws, policy, present=True)
# create s3 event notification for lambda
current_lambda_configs.append(new_configuration)
facts.update(LambdaFunctionConfigurations=current_lambda_configs)
api_params = dict(NotificationConfiguration=facts, Bucket=source_params['bucket'])
try:
if not module.check_mode:
client.put_bucket_notification_configuration(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating s3 event notification for lambda: {0}'.format(e))
else:
# state = 'absent'
if current_state == 'present':
# delete the lambda event notifications
if current_lambda_configs:
facts.update(LambdaFunctionConfigurations=current_lambda_configs)
api_params.update(NotificationConfiguration=facts)
try:
if not module.check_mode:
client.put_bucket_notification_configuration(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing s3 source event configuration: {0}'.format(e))
policy = dict(
statement_id=source_params['id'],
)
assert_policy_state(module, aws, policy, present=False)
return dict(changed=changed, ansible_facts=dict(lambda_s3_events=current_lambda_configs))
def lambda_event_sns(module, aws):
"""
Adds, updates or deletes lambda sns event notifications.
:param module: Ansible module reference
:param aws:
:return dict:
"""
client = aws.client('sns')
api_params = dict()
changed = False
current_state = 'absent'
state = module.params['state']
# check if required sub-parameters are present
source_params = module.params['source_params']
if not source_params.get('id'):
module.fail_json(msg="Source parameter 'id' is required for SNS event.")
if source_params.get('topic_arn'):
api_params = dict(TopicArn=source_params['topic_arn'])
else:
module.fail_json(msg="Source parameter 'topic_arn' is required for SNS event.")
# check if SNS subscription exists
current_subscription = dict()
endpoint = module.params['lambda_function_arn']
try:
while not current_subscription:
facts = client.list_subscriptions_by_topic(**api_params)
for subscription in facts.get('Subscriptions', []):
if subscription['Endpoint'] == endpoint:
current_subscription = subscription
current_state = 'present'
break
# if there are more than 100 subscriptions, NextToken will be present so if
# subscription is not found yet, get next block starting at NextToken
if 'NextToken' in facts and not current_subscription:
api_params.update(NextToken=facts['NextToken'])
else:
break
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error retrieving SNS subscriptions: {0}'.format(e))
if state == 'present':
if current_state == 'present':
# subscription cannot be updated so nothing to do here
pass
else:
# add policy permission before creating the subscription
policy = dict(
statement_id=source_params['id'],
action='lambda:InvokeFunction',
principal='sns.amazonaws.com',
source_arn=source_params['topic_arn'],
)
assert_policy_state(module, aws, policy, present=True)
# create subscription
api_params = dict(
TopicArn=source_params['topic_arn'],
Endpoint=endpoint,
Protocol='lambda'
)
try:
if not module.check_mode:
current_subscription = client.subscribe(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating SNS event mapping for lambda: {0}'.format(e))
else:
if current_state == 'present':
# remove subscription
api_params = dict(SubscriptionArn=current_subscription['SubscriptionArn'])
try:
if not module.check_mode:
client.unsubscribe(**api_params)
current_subscription = dict()
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing SNS event mapping for lambda: {0}'.format(e))
# remove policy associated with this event mapping
policy = dict(
statement_id=source_params['id'],
)
assert_policy_state(module, aws, policy, present=False)
return dict(changed=changed, ansible_facts=dict(lambda_sns_event=current_subscription))
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: ansible facts
"""
# produce a list of function suffixes which handle lambda events.
this_module = sys.modules[__name__]
source_choices = [function.split('_')[-1] for function in dir(this_module) if function.startswith('lambda_event')]
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
event_source=dict(required=True, default=None, choices=source_choices),
source_params=dict(type='dict', required=True, default=None),
alias=dict(required=False, default=None),
version=dict(type='int', required=False, default=0),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['alias', 'version']],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='Both boto3 & boto are required for this module.')
aws = AWSConnection(module, ['lambda', 's3', 'sns'])
validate_params(module, aws)
this_module_function = getattr(this_module, 'lambda_event_{0}'.format(module.params['event_source'].lower()))
results = this_module_function(module, aws)
module.exit_json(**results)
# ansible import module(s) kept at ~eof as recommended
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
|
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Type
from typing import Union
from flask import current_app
from flask.json import JSONEncoder
from jwt.algorithms import requires_cryptography
from flask_jwt_extended.typing import ExpiresDelta
class _Config(object):
"""
Helper object for accessing and verifying options in this extension. This
is meant for internal use of the application; modifying config options
should be done with flasks ```app.config```.
Default values for the configuration options are set in the jwt_manager
object. All of these values are read only. This is simply a loose wrapper
with some helper functionality for flasks `app.config`.
"""
@property
def is_asymmetric(self) -> bool:
return self.algorithm in requires_cryptography
@property
def encode_key(self) -> str:
return self._private_key if self.is_asymmetric else self._secret_key
@property
def decode_key(self) -> str:
return self._public_key if self.is_asymmetric else self._secret_key
@property
def token_location(self) -> Sequence[str]:
locations = current_app.config["JWT_TOKEN_LOCATION"]
if isinstance(locations, str):
locations = (locations,)
elif not isinstance(locations, Iterable):
raise RuntimeError("JWT_TOKEN_LOCATION must be a sequence or a set")
elif not locations:
raise RuntimeError(
"JWT_TOKEN_LOCATION must contain at least one "
'of "headers", "cookies", "query_string", or "json"'
)
for location in locations:
if location not in ("headers", "cookies", "query_string", "json"):
raise RuntimeError(
"JWT_TOKEN_LOCATION can only contain "
'"headers", "cookies", "query_string", or "json"'
)
return locations
@property
def jwt_in_cookies(self) -> bool:
return "cookies" in self.token_location
@property
def jwt_in_headers(self) -> bool:
return "headers" in self.token_location
@property
def jwt_in_query_string(self) -> bool:
return "query_string" in self.token_location
@property
def jwt_in_json(self) -> bool:
return "json" in self.token_location
@property
def header_name(self) -> str:
name = current_app.config["JWT_HEADER_NAME"]
if not name:
raise RuntimeError("JWT_ACCESS_HEADER_NAME cannot be empty")
return name
@property
def header_type(self) -> str:
return current_app.config["JWT_HEADER_TYPE"]
@property
def query_string_name(self) -> str:
return current_app.config["JWT_QUERY_STRING_NAME"]
@property
def query_string_value_prefix(self) -> str:
return current_app.config["JWT_QUERY_STRING_VALUE_PREFIX"]
@property
def access_cookie_name(self) -> str:
return current_app.config["JWT_ACCESS_COOKIE_NAME"]
@property
def refresh_cookie_name(self) -> str:
return current_app.config["JWT_REFRESH_COOKIE_NAME"]
@property
def access_cookie_path(self) -> str:
return current_app.config["JWT_ACCESS_COOKIE_PATH"]
@property
def refresh_cookie_path(self) -> str:
return current_app.config["JWT_REFRESH_COOKIE_PATH"]
@property
def cookie_secure(self) -> bool:
return current_app.config["JWT_COOKIE_SECURE"]
@property
def cookie_domain(self) -> str:
return current_app.config["JWT_COOKIE_DOMAIN"]
@property
def session_cookie(self) -> bool:
return current_app.config["JWT_SESSION_COOKIE"]
@property
def cookie_samesite(self) -> str:
return current_app.config["JWT_COOKIE_SAMESITE"]
@property
def json_key(self) -> str:
return current_app.config["JWT_JSON_KEY"]
@property
def refresh_json_key(self) -> str:
return current_app.config["JWT_REFRESH_JSON_KEY"]
@property
def csrf_protect(self) -> bool:
return self.jwt_in_cookies and current_app.config["JWT_COOKIE_CSRF_PROTECT"]
@property
def csrf_request_methods(self) -> Iterable[str]:
return current_app.config["JWT_CSRF_METHODS"]
@property
def csrf_in_cookies(self) -> bool:
return current_app.config["JWT_CSRF_IN_COOKIES"]
@property
def access_csrf_cookie_name(self) -> str:
return current_app.config["JWT_ACCESS_CSRF_COOKIE_NAME"]
@property
def refresh_csrf_cookie_name(self) -> str:
return current_app.config["JWT_REFRESH_CSRF_COOKIE_NAME"]
@property
def access_csrf_cookie_path(self) -> str:
return current_app.config["JWT_ACCESS_CSRF_COOKIE_PATH"]
@property
def refresh_csrf_cookie_path(self) -> str:
return current_app.config["JWT_REFRESH_CSRF_COOKIE_PATH"]
@property
def access_csrf_header_name(self) -> str:
return current_app.config["JWT_ACCESS_CSRF_HEADER_NAME"]
@property
def refresh_csrf_header_name(self) -> str:
return current_app.config["JWT_REFRESH_CSRF_HEADER_NAME"]
@property
def csrf_check_form(self) -> bool:
return current_app.config["JWT_CSRF_CHECK_FORM"]
@property
def access_csrf_field_name(self) -> str:
return current_app.config["JWT_ACCESS_CSRF_FIELD_NAME"]
@property
def refresh_csrf_field_name(self) -> str:
return current_app.config["JWT_REFRESH_CSRF_FIELD_NAME"]
@property
def access_expires(self) -> ExpiresDelta:
delta = current_app.config["JWT_ACCESS_TOKEN_EXPIRES"]
if type(delta) is int:
delta = timedelta(seconds=delta)
if delta is not False:
try:
# Basically runtime typechecking. Probably a better way to do
# this with proper type checking
delta + datetime.now(timezone.utc)
except TypeError as e:
err = (
"must be able to add JWT_ACCESS_TOKEN_EXPIRES to datetime.datetime"
)
raise RuntimeError(err) from e
return delta
@property
def refresh_expires(self) -> ExpiresDelta:
delta = current_app.config["JWT_REFRESH_TOKEN_EXPIRES"]
if type(delta) is int:
delta = timedelta(seconds=delta)
if delta is not False:
# Basically runtime typechecking. Probably a better way to do
# this with proper type checking
try:
delta + datetime.now(timezone.utc)
except TypeError as e:
err = (
"must be able to add JWT_REFRESH_TOKEN_EXPIRES to datetime.datetime"
)
raise RuntimeError(err) from e
return delta
@property
def algorithm(self) -> str:
return current_app.config["JWT_ALGORITHM"]
@property
def decode_algorithms(self) -> List[str]:
algorithms = current_app.config["JWT_DECODE_ALGORITHMS"]
if not algorithms:
return [self.algorithm]
if self.algorithm not in algorithms:
algorithms.append(self.algorithm)
return algorithms
@property
def _secret_key(self) -> str:
key = current_app.config["JWT_SECRET_KEY"]
if not key:
key = current_app.config.get("SECRET_KEY", None)
if not key:
raise RuntimeError(
"JWT_SECRET_KEY or flask SECRET_KEY "
"must be set when using symmetric "
'algorithm "{}"'.format(self.algorithm)
)
return key
@property
def _public_key(self) -> str:
key = current_app.config["JWT_PUBLIC_KEY"]
if not key:
raise RuntimeError(
"JWT_PUBLIC_KEY must be set to use "
"asymmetric cryptography algorithm "
'"{}"'.format(self.algorithm)
)
return key
@property
def _private_key(self) -> str:
key = current_app.config["JWT_PRIVATE_KEY"]
if not key:
raise RuntimeError(
"JWT_PRIVATE_KEY must be set to use "
"asymmetric cryptography algorithm "
'"{}"'.format(self.algorithm)
)
return key
@property
def cookie_max_age(self) -> Optional[int]:
# Returns the appropiate value for max_age for flask set_cookies. If
# session cookie is true, return None, otherwise return a number of
# seconds 1 year in the future
return None if self.session_cookie else 31540000 # 1 year
@property
def identity_claim_key(self) -> str:
return current_app.config["JWT_IDENTITY_CLAIM"]
@property
def exempt_methods(self) -> Iterable[str]:
return {"OPTIONS"}
@property
def error_msg_key(self) -> str:
return current_app.config["JWT_ERROR_MESSAGE_KEY"]
@property
def json_encoder(self) -> Type[JSONEncoder]:
return current_app.json_encoder
@property
def decode_audience(self) -> Union[str, Iterable[str]]:
return current_app.config["JWT_DECODE_AUDIENCE"]
@property
def encode_audience(self) -> Union[str, Iterable[str]]:
return current_app.config["JWT_ENCODE_AUDIENCE"]
@property
def encode_issuer(self) -> str:
return current_app.config["JWT_ENCODE_ISSUER"]
@property
def decode_issuer(self) -> str:
return current_app.config["JWT_DECODE_ISSUER"]
@property
def leeway(self) -> int:
return current_app.config["JWT_DECODE_LEEWAY"]
@property
def encode_nbf(self) -> bool:
return current_app.config["JWT_ENCODE_NBF"]
config = _Config()
|
|
"""
basic syntax of the parameter file is:
# simple parameter file
[driver]
nsteps = 100 ; comment
max_time = 0.25
[riemann]
tol = 1.e-10
max_iter = 10
[io]
basename = myfile_
The recommended way to use this is for the code to have a master list
of parameters and their defaults (e.g. _defaults), and then the
user can override these defaults at runtime through an inputs file.
These two files have the same format.
The calling sequence would then be:
runparams.LoadParams("_defaults")
runparams.LoadParams("inputs")
The parser will determine what datatype the parameter is (string,
integer, float), and store it in a global dictionary (globalParams).
If a parameter that already exists is encountered a second time (e.g.,
there is a default value in _defaults and the user specifies a new
value in inputs), then the second instance replaces the first.
Runtime parameters can then be accessed via any module through the
getParam method:
tol = runparams.getParam('riemann.tol')
An earlier version of this was based on the Python Cookbook, 4.11, but
we not longer use the ConfigParser module, instead roll our own regex.
If the optional flag noNew=1 is set, then the LoadParams function will
not define any new parameters, but only overwrite existing ones. This
is useful for reading in an inputs file that overrides previously read
default values.
"""
import string
import re
from util import msg
# we will keep track of the parameters and their comments globally
globalParams = {}
globalParamComments = {}
# for debugging -- keep track of which parameters were actually looked-
# up
usedParams = []
# some utility functions to automagically determine what the data
# types are
def isInt(string):
""" is the given string an interger? """
try: int(string)
except ValueError: return 0
else: return 1
def isFloat(string):
""" is the given string a float? """
try: float(string)
except ValueError: return 0
else: return 1
def LoadParams(file, noNew=0):
"""
reads lines from file and makes dictionary pairs from the data
to store in globalParams.
"""
global globalParams
# check to see whether the file exists
try: f = open(file, 'r')
except IOError:
msg.fail("ERROR: parameter file does not exist: %s" % (file))
# we could use the ConfigParser, but we actually want to have
# our configuration files be self-documenting, of the format
# key = value ; comment
sec = re.compile(r'^\[(.*)\]')
eq = re.compile(r'^([^=#]+)=([^;]+);{0,1}(.*)')
for line in f.readlines():
if sec.search(line):
lbracket, section, rbracket = sec.split(line)
section = string.lower(section.strip())
elif eq.search(line):
left, item, value, comment, right = eq.split(line)
item = string.lower(item.strip())
# define the key
key = section + "." + item
# if we have noNew = 1, then we only want to override existing
# key/values
if (noNew):
if (not key in globalParams.keys()):
msg.warning("warning, key: %s not defined" % (key))
continue
# check in turn whether this is an interger, float, or string
if (isInt(value)):
globalParams[key] = int(value)
elif (isFloat(value)):
globalParams[key] = float(value)
else:
globalParams[key] = value.strip()
# if the comment already exists (i.e. from reading in _defaults)
# and we are just resetting the value of the parameter (i.e.
# from reading in inputs), then we don't want to destroy the
# comment
if comment.strip() == "":
try:
comment = globalParamComments[key]
except KeyError:
comment = ""
globalParamComments[key] = comment.strip()
def CommandLineParams(cmdStrings):
"""
finds dictionary pairs from a string that came from the
commandline. Stores the parameters in globalParams only if they
already exist.
"""
global globalParams
# we expect things in the string in the form:
# ["sec.opt=value", "sec.opt=value"]
# with each opt an element in the list
for item in cmdStrings:
# break it apart
key, value = item.split("=")
# we only want to override existing keys/values
if (not key in globalParams.keys()):
msg.warning("warning, key: %s not defined" % (key))
continue
# check in turn whether this is an interger, float, or string
if (isInt(value)):
globalParams[key] = int(value)
elif (isFloat(value)):
globalParams[key] = float(value)
else:
globalParams[key] = value.strip()
def getParam(key):
"""
returns the value of the runtime parameter corresponding to the
input key
"""
if globalParams == {}:
msg.warning("WARNING: runtime parameters not yet initialized")
LoadParams("_defaults")
# debugging
if not key in usedParams:
usedParams.append(key)
if key in globalParams.keys():
return globalParams[key]
else:
msg.fail("ERROR: runtime parameter %s not found" % (key))
def printUnusedParams():
"""
print out the list of parameters that were defined by never used
"""
for key in globalParams.keys():
if not key in usedParams:
msg.warning("parameter %s never used" % (key))
def PrintAllParams():
keys = globalParams.keys()
keys.sort()
for key in keys:
print key, "=", globalParams[key]
print " "
def PrintParamFile():
keys = globalParams.keys()
keys.sort()
try: f = open('inputs.auto', 'w')
except IOError:
msg.fail("ERROR: unable to open inputs.auto")
f.write('# automagically generated parameter file\n')
currentSection = " "
for key in keys:
parts = string.split(key, '.')
section = parts[0]
option = parts[1]
if (section != currentSection):
currentSection = section
f.write('\n')
f.write('[' + section + ']\n')
if (isinstance(globalParams[key], int)):
value = '%d' % globalParams[key]
elif (isinstance(globalParams[key], float)):
value = '%f' % globalParams[key]
else:
value = globalParams[key]
if (globalParamComments[key] != ''):
f.write(option + ' = ' + value + ' ; ' + globalParamComments[key] + '\n')
else:
f.write(option + ' = ' + value + '\n')
f.close()
if __name__== "__main__":
LoadParams("inputs.test")
PrintParamFile()
|
|
import logging
from archinfo.arch_soot import ArchSoot, SootAddressDescriptor
from .sim_state import SimState
from .calling_conventions import DEFAULT_CC, SimRegArg, SimStackArg, PointerWrapper
from .callable import Callable
from .errors import AngrAssemblyError
from .engines import UberEngine, ProcedureEngine, SimEngineConcrete
l = logging.getLogger(name=__name__)
class AngrObjectFactory(object):
"""
This factory provides access to important analysis elements.
"""
def __init__(self, project, default_engine=None):
if default_engine is None:
default_engine = UberEngine
self.project = project
self._default_cc = DEFAULT_CC[project.arch.name]
self.default_engine = default_engine(project)
self.procedure_engine = ProcedureEngine(project)
if project.concrete_target:
self.concrete_engine = SimEngineConcrete(project)
else:
self.concrete_engine = None
def snippet(self, addr, jumpkind=None, **block_opts):
if self.project.is_hooked(addr) and jumpkind != 'Ijk_NoHook':
hook = self.project._sim_procedures[addr]
size = hook.kwargs.get('length', 0)
return HookNode(addr, size, self.project.hooked_by(addr))
elif self.project.simos.is_syscall_addr(addr):
syscall = self.project.simos.syscall_from_addr(addr)
size = syscall.kwargs.get('length', 0)
return SyscallNode(addr, size, syscall)
else:
return self.block(addr, **block_opts).codenode # pylint: disable=no-member
def successors(self, *args, engine=None, **kwargs):
"""
Perform execution using an engine. Generally, return a SimSuccessors object classifying the results of the run.
:param state: The state to analyze
:param engine: The engine to use. If not provided, will use the project default.
:param addr: optional, an address to execute at instead of the state's ip
:param jumpkind: optional, the jumpkind of the previous exit
:param inline: This is an inline execution. Do not bother copying the state.
Additional keyword arguments will be passed directly into each engine's process method.
"""
if engine is not None:
return engine.process(*args, **kwargs)
return self.default_engine.process(*args, **kwargs)
def blank_state(self, **kwargs):
"""
Returns a mostly-uninitialized state object. All parameters are optional.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: A dictionary of file names with associated preset SimFile objects.
:param concrete_fs: bool describing whether the host filesystem should be consulted when opening files.
:param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only
when concrete_fs is set to True.
:param kwargs: Any additional keyword args will be passed to the SimState constructor.
:return: The blank state.
:rtype: SimState
"""
return self.project.simos.state_blank(**kwargs)
def entry_state(self, **kwargs):
"""
Returns a state object representing the program at its entry point. All parameters are optional.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: a dictionary of file names with associated preset SimFile objects.
:param concrete_fs: boolean describing whether the host filesystem should be consulted when opening files.
:param chroot: a path to use as a fake root directory, behaves similar to a real chroot. used only when
concrete_fs is set to True.
:param argc: a custom value to use for the program's argc. May be either an int or a bitvector. If
not provided, defaults to the length of args.
:param args: a list of values to use as the program's argv. May be mixed strings and bitvectors.
:param env: a dictionary to use as the environment for the program. Both keys and values may be
mixed strings and bitvectors.
:return: The entry state.
:rtype: SimState
"""
return self.project.simos.state_entry(**kwargs)
def full_init_state(self, **kwargs):
"""
Very much like :meth:`entry_state()`, except that instead of starting execution at the program entry point,
execution begins at a special SimProcedure that plays the role of the dynamic loader, calling each of the
initializer functions that should be called before execution reaches the entry point.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: a dictionary of file names with associated preset SimFile objects.
:param concrete_fs: boolean describing whether the host filesystem should be consulted when opening files.
:param chroot: a path to use as a fake root directory, behaves similar to a real chroot. used only when
concrete_fs is set to True.
:param argc: a custom value to use for the program's argc. May be either an int or a bitvector. If
not provided, defaults to the length of args.
:param args: a list of values to use as arguments to the program. May be mixed strings and bitvectors.
:param env: a dictionary to use as the environment for the program. Both keys and values may be
mixed strings and bitvectors.
:return: The fully initialized state.
:rtype: SimState
"""
return self.project.simos.state_full_init(**kwargs)
def call_state(self, addr, *args, **kwargs):
"""
Returns a state object initialized to the start of a given function, as if it were called with given parameters.
:param addr: The address the state should start at instead of the entry point.
:param args: Any additional positional arguments will be used as arguments to the function call.
The following parametrs are optional.
:param base_state: Use this SimState as the base for the new state instead of a blank state.
:param cc: Optionally provide a SimCC object to use a specific calling convention.
:param ret_addr: Use this address as the function's return target.
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
:param toc: The address of the table of contents for ppc64
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: A dictionary of file names with associated preset SimFile objects.
:param concrete_fs: bool describing whether the host filesystem should be consulted when opening files.
:param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only
when concrete_fs is set to True.
:param kwargs: Any additional keyword args will be passed to the SimState constructor.
:return: The state at the beginning of the function.
:rtype: SimState
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `SimCC.PointerWrapper`. Any value
that can't fit in a register will be automatically put in a
PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the current stack pointer will be used, and it will be updated.
You might not like the results if you provide stack_base but not alloc_base.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequencial
allocations happen at increasing addresses.
"""
return self.project.simos.state_call(addr, *args, **kwargs)
def simulation_manager(self, thing=None, **kwargs):
"""
Constructs a new simulation manager.
:param thing: Optional - What to put in the new SimulationManager's active stash (either a SimState or a list of SimStates).
:param kwargs: Any additional keyword arguments will be passed to the SimulationManager constructor
:returns: The new SimulationManager
:rtype: angr.sim_manager.SimulationManager
Many different types can be passed to this method:
* If nothing is passed in, the SimulationManager is seeded with a state initialized for the program
entry point, i.e. :meth:`entry_state()`.
* If a :class:`SimState` is passed in, the SimulationManager is seeded with that state.
* If a list is passed in, the list must contain only SimStates and the whole list will be used to seed the SimulationManager.
"""
if thing is None:
thing = [ self.entry_state() ]
elif isinstance(thing, (list, tuple)):
if any(not isinstance(val, SimState) for val in thing):
raise AngrError("Bad type to initialize SimulationManager")
elif isinstance(thing, SimState):
thing = [ thing ]
else:
raise AngrError("BadType to initialze SimulationManager: %s" % repr(thing))
return SimulationManager(self.project, active_states=thing, **kwargs)
def simgr(self, *args, **kwargs):
"""
Alias for `simulation_manager` to save our poor fingers
"""
return self.simulation_manager(*args, **kwargs)
def callable(self, addr, concrete_only=False, perform_merge=True, base_state=None, toc=None, cc=None):
"""
A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple states
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.callable.Callable
"""
return Callable(self.project,
addr=addr,
concrete_only=concrete_only,
perform_merge=perform_merge,
base_state=base_state,
toc=toc,
cc=cc)
def cc(self, args=None, ret_val=None, sp_delta=None, func_ty=None):
"""
Return a SimCC (calling convention) parametrized for this project and, optionally, a given function.
:param args: A list of argument storage locations, as SimFunctionArguments.
:param ret_val: The return value storage location, as a SimFunctionArgument.
:param sp_delta: Does this even matter??
:param func_ty: The prototype for the given function, as a SimType or a C-style function declaration that
can be parsed into a SimTypeFunction instance.
Example func_ty strings:
>>> "int func(char*, int)"
>>> "int f(int, int, int*);"
Function names are ignored.
Relevant subclasses of SimFunctionArgument are SimRegArg and SimStackArg, and shortcuts to them can be found on
this `cc` object.
For stack arguments, offsets are relative to the stack pointer on function entry.
"""
return self._default_cc(arch=self.project.arch,
args=args,
ret_val=ret_val,
sp_delta=sp_delta,
func_ty=func_ty)
def cc_from_arg_kinds(self, fp_args, ret_fp=None, sizes=None, sp_delta=None, func_ty=None):
"""
Get a SimCC (calling convention) that will extract floating-point/integral args correctly.
:param arch: The Archinfo arch for this CC
:param fp_args: A list, with one entry for each argument the function can take. True if the argument is fp,
false if it is integral.
:param ret_fp: True if the return value for the function is fp.
:param sizes: Optional: A list, with one entry for each argument the function can take. Each entry is the
size of the corresponding argument in bytes.
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:param func_ty: A SimType for the function itself or a C-style function declaration that can be parsed into
a SimTypeFunction instance.
Example func_ty strings:
>>> "int func(char*, int)"
>>> "int f(int, int, int*);"
Function names are ignored.
"""
return self._default_cc.from_arg_kinds(arch=self.project.arch,
fp_args=fp_args,
ret_fp=ret_fp,
sizes=sizes,
sp_delta=sp_delta,
func_ty=func_ty)
def block(self, addr, size=None, max_size=None, byte_string=None, vex=None, thumb=False, backup_state=None,
extra_stop_points=None, opt_level=None, num_inst=None, traceflags=0,
insn_bytes=None, insn_text=None, # backward compatibility
strict_block_end=None, collect_data_refs=False,
):
if isinstance(self.project.arch, ArchSoot) and isinstance(addr, SootAddressDescriptor):
return SootBlock(addr, arch=self.project.arch, project=self.project)
if insn_bytes is not None and insn_text is not None:
raise AngrError("You cannot provide both 'insn_bytes' and 'insn_text'!")
if insn_bytes is not None:
byte_string = insn_bytes
if insn_text is not None:
byte_string = self.project.arch.asm(insn_text, addr=addr, as_bytes=True, thumb=thumb)
if byte_string is None:
# assembly failed
raise AngrAssemblyError("Assembling failed. Please make sure keystone is installed, and the assembly"
" string is correct.")
if max_size is not None:
l.warning('Keyword argument "max_size" has been deprecated for block(). Please use "size" instead.')
size = max_size
return Block(addr, project=self.project, size=size, byte_string=byte_string, vex=vex,
extra_stop_points=extra_stop_points, thumb=thumb, backup_state=backup_state,
opt_level=opt_level, num_inst=num_inst, traceflags=traceflags,
strict_block_end=strict_block_end, collect_data_refs=collect_data_refs,
)
def fresh_block(self, addr, size, backup_state=None):
return Block(addr, project=self.project, size=size, backup_state=backup_state)
cc.SimRegArg = SimRegArg
cc.SimStackArg = SimStackArg
_default_cc = None
callable.PointerWrapper = PointerWrapper
call_state.PointerWrapper = PointerWrapper
from .errors import AngrError
from .sim_manager import SimulationManager
from .codenode import HookNode, SyscallNode
from .block import Block, SootBlock
|
|
#! /usr/bin/env python
# encoding: utf-8
import io
import os
import requests.exceptions
from . import exceptions
from tusclient import client
class UploadVideoMixin:
"""Handle uploading a new video to the Vimeo API."""
UPLOAD_ENDPOINT = '/me/videos'
VERSIONS_ENDPOINT = '{video_uri}/versions'
DEFAULT_CHUNK_SIZE = (200 * 1024 * 1024) # 200 MB
def upload(self, filename, **kwargs):
"""Upload a file.
This should be used to upload a local file. If you want a form for your
site to upload direct to Vimeo, you should look at the `POST
/me/videos` endpoint.
https://developer.vimeo.com/api/endpoints/videos#POST/users/{user_id}/videos
Args:
filename (string): Path on disk to file
**kwargs: Supply a `data` dictionary for data to set to your video
when uploading. See the API documentation for parameters you
can send. This is optional.
Returns:
string: The Vimeo Video URI of your uploaded video.
Raises:
UploadAttemptCreationFailure: If we were unable to create an upload
attempt for you.
VideoUploadFailure: If unknown errors occured when uploading your
video.
"""
filesize = self.__get_file_size(filename)
uri = self.UPLOAD_ENDPOINT
data = kwargs['data'] if 'data' in kwargs else {}
# Is a `chunk_size` specified? Use default value if not.
proposed_or_default_chunk_size = data.get('chunk_size', self.DEFAULT_CHUNK_SIZE)
# For efficiency, lets ensure the pending chunk_size does not result in too many cycles
chunk_size = self.apply_chunk_size_rules(proposed_or_default_chunk_size, filesize)
# Ignore any specified upload approach and size.
if 'upload' not in data:
data['upload'] = {
'approach': 'tus',
'size': filesize
}
else:
data['upload']['approach'] = 'tus'
data['upload']['size'] = filesize
attempt = self.post(uri, data=data, params={'fields': 'uri,upload'})
if attempt.status_code != 200:
raise exceptions.UploadAttemptCreationFailure(
attempt,
"Unable to initiate an upload attempt."
)
attempt = attempt.json()
return self.__perform_tus_upload(filename, attempt, chunk_size=chunk_size)
def replace(self, video_uri, filename, **kwargs):
"""Replace the source of a single Vimeo video.
https://developer.vimeo.com/api/endpoints/videos#POST/videos/{video_id}/versions
Args:
video_uri (string): Vimeo Video URI
filename (string): Path on disk to file
**kwargs: Supply a `data` dictionary for data to set to your video
when uploading. See the API documentation for parameters you
can send. This is optional.
Returns:
string: The Vimeo Video URI of your replaced video.
"""
filesize = self.__get_file_size(filename)
uri = self.VERSIONS_ENDPOINT.format(video_uri=video_uri)
data = kwargs['data'] if 'data' in kwargs else {}
data['file_name'] = os.path.basename(filename)
# Is a `chunk_size` specified? Use default value if not.
proposed_or_default_chunk_size = data.get('chunk_size', self.DEFAULT_CHUNK_SIZE)
# For efficiency, lets ensure the pending chunk_size does not result in too many cycles
chunk_size = self.apply_chunk_size_rules(proposed_or_default_chunk_size, filesize)
# Ignore any specified upload approach and size.
if 'upload' not in data:
data['upload'] = {
'approach': 'tus',
'size': filesize
}
else:
data['upload']['approach'] = 'tus'
data['upload']['size'] = filesize
attempt = self.post(uri, data=data, params={'fields': 'upload'})
if attempt.status_code != 201:
raise exceptions.UploadAttemptCreationFailure(
attempt,
"Unable to initiate an upload attempt."
)
attempt = attempt.json()
# `uri` doesn't come back from `/videos/:id/versions` so we need to
# manually set it here for uploading.
attempt['uri'] = video_uri
return self.__perform_tus_upload(filename, attempt, chunk_size=chunk_size)
def __perform_tus_upload(self, filename, attempt, chunk_size=DEFAULT_CHUNK_SIZE):
"""Take an upload attempt and perform the actual upload via tus.
https://tus.io/
Args:
filename (string): name of the video file on vimeo.com
attempt (:obj): requests object
chunk_size (int): size of each chunk. defaults to DEFAULT_CHUNK_SIZE
Returns:
string: The Vimeo Video URI of your uploaded video.
Raises:
VideoUploadFailure: If unknown errors occured when uploading your
video.
"""
upload_link = attempt.get('upload').get('upload_link')
try:
with io.open(filename, 'rb') as fs:
tus_client = client.TusClient('https://files.tus.vimeo.com')
uploader = tus_client.uploader(
chunk_size=chunk_size,
file_stream=fs,
retries=3,
url=upload_link)
uploader.upload()
except Exception as e:
raise exceptions.VideoUploadFailure(
e,
'Unexpected error when uploading through tus.'
)
return attempt.get('uri')
@staticmethod
def apply_chunk_size_rules(proposed_chunk_size, file_size):
"""
Enforces the notion that a User may supply any `proposed_chunk_size`, as long as it results in 1024 or less
proposed chunks. In the event it does not, then the "chunk_size" becomes the file_size divided by 1024.
Args:
proposed_chunk_size (int): chunk size in bytes
file_size (int): the size of the file to be uploaded, in bytes
Returns:
int:
"""
proposed_chunk_size = 1 if proposed_chunk_size <= 0 else proposed_chunk_size
chunks = file_size // proposed_chunk_size
divides_evenly = file_size % proposed_chunk_size == 0
number_of_chunks_proposed = chunks if divides_evenly else chunks + 1
if number_of_chunks_proposed > 1024:
return (file_size // 1024) + 1
return proposed_chunk_size
def __get_file_size(self, filename):
"""Get the size of a specific file.
Args:
filename (string): Path on disk to file
Returns:
integer: The size of the file.
"""
try:
return os.path.getsize(filename)
except TypeError:
return len(filename.read())
class UploadPictureMixin:
"""
Class for uploading a picture to Vimeo.
Functionality for uploading a picture to Vimeo for another object
(video, user, etc).
"""
BASE_FIELDS = {'link', 'uri'}
def upload_picture(self, obj, filename, activate=False, fields=None):
"""
Upload a picture for the object.
The object (obj) can be the URI for the object or the response/parsed
json for it.
"""
if isinstance(obj, str):
obj = self.get(
obj, params={'fields': 'metadata.connections.pictures.uri'})
if obj.status_code != 200:
raise exceptions.ObjectLoadFailure(
"Failed to load the target object")
obj = obj.json()
if isinstance(fields, str):
fields = {field.strip() for field in fields.split(',')}
fields = self.BASE_FIELDS.union(fields) if fields else self.BASE_FIELDS
# Get the picture object.
picture = self.post(
obj['metadata']['connections']['pictures']['uri'],
params={'fields': ','.join(fields)}
)
if picture.status_code != 201:
raise exceptions.PictureCreationFailure(
picture, "Failed to create a new picture with Vimeo.")
picture = picture.json()
with io.open(filename, 'rb') as f:
upload_resp = self.put(
picture['link'],
data=f,
params={'fields': 'error'})
if upload_resp.status_code != 200:
raise exceptions.PictureUploadFailure(
upload_resp, "Failed uploading picture")
if activate:
active = self.patch(
picture['uri'],
data={"active": "true"},
params={'fields': 'error'})
if active.status_code != 200:
raise exceptions.PictureActivationFailure(
active, "Failed activating picture")
picture['active'] = True
return picture
class UploadTexttrackMixin:
"""Functionality for uploading a texttrack to Vimeo for a video."""
TEXTTRACK_ENDPOINT = '{video_uri}/texttracks'
BASE_FIELDS = {'link'}
def upload_texttrack(self, video_uri, track_type, language, filename,
fields=None):
"""Upload the texttrack at the given uri with the named source file."""
uri = self.TEXTTRACK_ENDPOINT.format(video_uri=video_uri)
name = filename.split('/')[-1]
if isinstance(fields, str):
fields = {field.strip() for field in fields.split(',')}
fields = self.BASE_FIELDS.union(fields) if fields else self.BASE_FIELDS
texttrack = self.post(uri,
data={'type': track_type,
'language': language,
'name': name},
params={'fields': ','.join(fields)})
if texttrack.status_code != 201:
raise exceptions.TexttrackCreationFailure(
texttrack, "Failed to create a new texttrack with Vimeo")
texttrack = texttrack.json()
with io.open(filename, 'rb') as f:
upload_resp = self.put(texttrack['link'], data=f)
if upload_resp.status_code != 200:
raise exceptions.TexttrackUploadFailure(
upload_resp, "Failed uploading texttrack")
return texttrack
class UploadMixin(UploadVideoMixin, UploadPictureMixin, UploadTexttrackMixin):
"""Handle uploading to the Vimeo API."""
pass
|
|
import random
import point
import time
import stats
import plot
import math
import uav
import rospy
import tf
import violations
import controller
import zmqros
from geometry_msgs.msg import Twist
class Simulation(object):
POSITION_NOISE = 0.1
ORIENTATION_NOISE = 0.1
def __init__(self, problem, risk_grid, **kwargs):
rospy.init_node("rover", anonymous=True)
problem.grid.set_risk_grid(risk_grid)
self.control_noise = kwargs.get("control_noise", 0.1)
self.init_problem_instance(problem, risk_grid, kwargs)
self.init_visualizations(problem, risk_grid, kwargs)
self.init_zmqros(problem)
self.init_configurations(problem, risk_grid, kwargs)
self.init_statistics(problem, risk_grid, kwargs)
self.msg_type = "geometry_msgs/Twist"
self.max_z_vel = 50 # cm/s
self.max_o_vel = 30 # deg/s
def init_zmqros(self, problem):
if self.practical:
self.swarm = zmqros.coordinator.create_swarm_from_ns(
zmqros.get_ns_host(), zmqros.get_ns_port()
)
else:
self.ns = None
def init_problem_instance(self, problem, risk_grid, kwargs):
# problem instance setup
self.problem = problem
self.risk_grid = risk_grid
self.planner_obj = kwargs.get("algorithm")
self.prev_waypoints = dict()
self.practical = kwargs.get("practical", False)
self.names = kwargs.get("names", dict())
def init_configurations(self, problem, risk_grid, kwargs):
# ROS stuff
self.listener = tf.TransformListener()
self.pubs = self.init_pubs(self.names)
self.quad_list = self.init_quads()
self.controllers = self.init_controllers()
self.pl = self.planner_obj(problem, risk_grid, self.quad_list)
def init_visualizations(self, problem, risk_grid, kwargs):
# visualization variables
self.drawer = kwargs.get("drawer", None)
self.show_time_grid = kwargs.get("show_time_grid", True)
if not self.drawer is None:
self.drawer.draw_risk_grid(self.risk_grid)
def init_statistics(self, problem, risk_grid, kwargs):
# statistics gathering classes
self.mca = stats.MonteCarloArea(problem, 1000)
self.sqa = stats.SensorQualityAverage(self.pl)
self.ra = stats.RiskAverage(self.pl)
self.atd = stats.AverageTimeDifference(self.problem.grid)
self.init_stats(**kwargs)
def init_pubs(self, names):
pub_dict = dict()
for name, topic_name in names.iteritems():
pub_dict[name] = rospy.Publisher(
topic_name, Twist, queue_size=10
)
return pub_dict
def init_stats(self, **kwargs):
out_file = kwargs.get("out_file", None)
position_file = kwargs.get("position_file", None)
verification_file = kwargs.get("verification_file", None)
if not out_file is None:
self.out_file = open(out_file, "w")
else:
self.out_file = None
if not position_file is None:
self.position_file = open(position_file, "w")
else:
self.position_file = None
if not verification_file is None:
self.verification_file = open(verification_file, "w")
else:
self.verification_file = None
if self.show_time_grid:
self.t_plotter = plot.TimeGridPlotter(self.problem.grid)
def init_quads_practical(self):
quad_list = list()
for name, _ in self.pubs.iteritems():
quad = uav.UAV(self.problem, name)
try:
x, y, z, b = self.get_configuration(quad)
except tf.Exception:
raise RuntimeError("Could not initialize quad positions")
exit()
quad.set_position(x, y, z)
quad.set_orientation(math.degrees(b))
quad_list.append(quad)
return quad_list
def init_quads_simulation(self):
quad_list = list()
init_length = math.ceil(math.sqrt(self.problem.num_quads))
quad_spacing = 20
for i in xrange(self.problem.num_quads):
down = int(i // init_length)
accross = int(i % init_length)
s_x, s_y = (
100 + quad_spacing * self.problem.quad_size * accross,
100 + quad_spacing * self.problem.quad_size * down
)
quad = uav.UAV(self.problem)
quad.set_position(s_x, s_y, 10)
quad.set_orientation(0)
quad.set_camera_angle(self.problem.initial_camera_angle)
quad_list.append(quad)
return quad_list
def init_controllers(self):
Kp, Ki, Kd = 1, 0, 0.5
controllers = dict()
for quad in self.quad_list:
clr = controller.PID(quad, Kp, Ki, Kd)
clr.set_noise_std(self.control_noise)
controllers[quad] = clr
return controllers
def init_quads(self):
if self.practical:
quad_list = self.init_quads_practical()
else:
quad_list = self.init_quads_simulation()
for quad in quad_list:
self.problem.grid.update_grid(quad, 1)
self.prev_waypoints[quad] = (quad.x, quad.y, quad.z, quad.beta)
return quad_list
def saturate(self, val, max_val):
if val > max_val:
return max_val
else:
return val
def publish_practical_configuration(self, quad, heading, beta, phi):
vel = Twist()
vel.angular.x = 0
vel.angular.y = 0
vel.angular.z = math.radians(
self.saturate(beta - quad.beta, self.max_o_vel)
)
vel.linear.x = heading.x * quad.speed
vel.linear.y = heading.y * quad.speed
vel.linear.z = self.saturate(50 * heading.z / 5.0, self.max_z_vel)
pub_vel = self.convert_coordinates_vicon(vel)
self.swarm[quad.get_name()].send_message(
self.msg_type,
self.names[quad.get_name()],
pub_vel
)
expected = [0, 0, 0, 0]
expected[0] = quad.x + vel.linear.x * quad.speed
expected[1] = quad.y + vel.linear.y * quad.speed
expected[2] = quad.z + vel.linear.z
expected[3] = beta
return tuple(expected)
def publish_simulated_configuration(self, quad, heading, beta, phi):
waypoint = point.Point(
quad.x + heading.get_x() * quad.speed,
quad.y + heading.get_y() * quad.speed,
quad.z + heading.get_z()
)
self.controllers[quad].publish_waypoint(waypoint)
# Uncomment this line if dont you want dynamics dude
# quad.set_position(waypoint.x, waypoint.y, waypoint.z)
quad.set_orientation(beta)
quad.set_camera_angle(phi)
return waypoint.x, waypoint.y, waypoint.z, quad.beta
def publish_configuration(self, quad, heading, beta, phi, iteration):
if self.practical:
expected = self.publish_practical_configuration(
quad, heading, beta, phi
)
else:
expected = self.publish_simulated_configuration(
quad, heading, beta, phi
)
self.problem.grid.update_grid(quad, iteration + 2)
return expected
def publish_hover(self, quad, iteration):
expected = self.publish_configuration(
quad, point.Point(0, 0, 0), quad.beta, quad.phi, iteration
)
return expected
def publish_towards_center(self, quad, vio, iteration):
vc = point.Point(0, 0, 0)
if vio == violations.X_OUT or vio == violations.Y_OUT:
vc.x = self.problem.width / 2 - quad.x
vc.y = self.problem.height / 2 - quad.y
uvc = vc.to_unit_vector()
elif vio == violations.Z_OUT:
vc.z = self.problem.sq_height - quad.z
uvc = vc
expected = self.publish_configuration(
quad, uvc, quad.beta, quad.phi, iteration
)
return expected
def get_configuration(self, quad):
if self.practical:
try:
tf_world = "/world"
quad_id = str(self.swarm.get_id_by_name(quad.get_name()))
our_obj = "/" + quad_id + "/base_link"
except KeyError:
raise AttributeError(
"{} does not exist in this world"
.format(quad.get_name())
)
self.listener.waitForTransform(
tf_world, our_obj,
rospy.Time(0), rospy.Duration(0.1)
)
trans, rot = self.listener.lookupTransform(
tf_world, our_obj, rospy.Time(0)
)
actual_conf = self.convert_coordinates_rviz(trans, rot)
return actual_conf
else:
x = quad.x + random.gauss(0, self.POSITION_NOISE)
y = quad.y + random.gauss(0, self.POSITION_NOISE)
z = quad.z + random.gauss(0, self.POSITION_NOISE)
beta = quad.beta + random.gauss(0, self.ORIENTATION_NOISE)
return x, y, z, beta
def convert_coordinates_vicon(self, waypoint):
pos = waypoint.linear
pos.x = pos.x / 100.0
pos.y = pos.y / 100.0
pos.z = pos.z / 100.0
waypoint.linear = pos
return waypoint
def convert_coordinates_rviz(self, trans, rot):
r_x = trans[0] * 100 + self.problem.width / 2
r_y = trans[1] * 100 + self.problem.height / 2
r_z = trans[2] * 100
_, _, beta = tf.transformations.euler_from_quaternion(rot)
return r_x, r_y, r_z, math.degrees(beta)
def is_safe(self, quad):
if quad.x > self.problem.width or quad.x < 0:
return False, violations.X_OUT, None
if quad.y > self.problem.height or quad.y < 0:
return False, violations.Y_OUT, None
too_high = quad.z > self.problem.max_height
too_low = quad.z < self.problem.min_height
if too_high or too_low:
return False, violations.Z_OUT, None
for q in self.quad_list:
distance = q.get_point_2d().dist_to(quad.get_point_2d())
too_close = distance < self.problem.min_safe_distance
is_self = q == quad
if too_close and not is_self:
return False, violations.TOO_CLOSE, q
return True, violations.NONE, None
def run(self):
# f = open('sandbox/multi_verify_2.txt')
for i in xrange(self.problem.num_steps):
self.problem.grid.set_start()
for quad in self.quad_list:
try:
rpx, rpy, rpz, rb = self.get_configuration(quad)
except tf.Exception as e:
rpx, rpy, rpz, rb = quad.x, quad.y, quad.z, quad.beta
if self.practical:
print "Error"
self.swarm[quad.get_name()].send_message(
"std_msgs/Empty", "/land", None
)
print e
finally:
quad.set_position(rpx, rpy, rpz)
quad.set_orientation(rb)
self.execute_control(quad, i)
self.write_verification_results(
(rpx, rpy, rpz, rb),
self.prev_waypoints[quad], i
)
# data = f.readline()
# data = map(float, data.split(" "))
# quad.x = data[5]
# quad.y = data[6]
# quad.z = data[7]
# quad.beta = data[8]
# self.problem.grid.update_grid(quad, i + 2)
self.visualize()
self.update_stats(i)
self.write_stats_results(i + 2)
def execute_control(self, quad, i):
if self.practical:
self.problem.grid.update_grid(quad, i + 2)
conf_allowed, vio, extra = self.is_safe(quad)
if conf_allowed:
heading, beta, phi = self.pl.get_next_configuration(quad, i)
expected = self.publish_configuration(
quad, heading, beta, phi, i
)
else:
if vio == violations.TOO_CLOSE:
heading = (quad.get_position() - extra)\
.to_unit_vector()
expected = self.publish_configuration(
quad, heading, quad.beta, quad.phi, i
)
else:
expected = self.publish_towards_center(
quad, vio, i
)
self.prev_waypoints[quad] = expected
def visualize(self):
if self.show_time_grid:
self.t_plotter.update()
if not self.drawer is None:
self.drawer.clear_all()
for quad in self.quad_list:
self.drawer.draw_quad(quad)
self.drawer.update()
def update_stats(self, iteration):
self.mca.update_average_efficiency(self.quad_list)
self.sqa.update_average_sq(self.quad_list)
self.ra.update_average_risk(self.quad_list)
self.atd.update_average_time_difference(iteration + 2)
def write_stats_results(self, ct):
if not self.out_file is None:
self.out_file.write("{} {} {} {} {} {} {} {} {} {}\n".format(
self.mca.get_moving_average_efficiency(),
self.sqa.get_moving_average(),
self.ra.get_moving_average(),
self.atd.get_average(),
self.problem.grid.get_cumulative_coverage(),
self.problem.grid.get_inter_cum_coverage(),
self.problem.grid.get_average_iter_between(),
self.problem.grid.get_average_time_between(),
self.problem.grid.get_performance(ct),
time.time()
))
def write_verification_results(self, expected_pos, out_pos, iteration):
if not self.verification_file is None:
self.verification_file.write("{} {} {} {} {} {} {} {} {}\n".format(
iteration,
expected_pos[0],
expected_pos[1],
expected_pos[2],
expected_pos[3],
out_pos[0],
out_pos[1],
out_pos[2],
out_pos[3]
))
|
|
"""
NetworkX to d3.js Force Layout
==============================
MPLD3 Plugin to convert a NetworkX graph to a force layout.
This is an example demoed `here
<http://blog.kdheepak.com/mpld3-networkx-d3js-force-layout.html>`_
You can download the plugin from the Github repo `here
<https://github.com/kdheepak/mpld3_plugins/blob/master/mpld3_plugins/plugins/networkxd3forcelayout.py>`_
BSD-Clause 3 License
Copyright (C) 2016 Dheepak Krishnamurthy
"""
import mpld3
graph = {'directed': False,
'graph': {'name': "Zachary's Karate Club"},
'links': [{'source': 0, 'target': 1},
{'source': 0, 'target': 2},
{'source': 0, 'target': 3},
{'source': 0, 'target': 4},
{'source': 0, 'target': 5},
{'source': 0, 'target': 6},
{'source': 0, 'target': 7},
{'source': 0, 'target': 8},
{'source': 0, 'target': 10},
{'source': 0, 'target': 11},
{'source': 0, 'target': 12},
{'source': 0, 'target': 13},
{'source': 0, 'target': 17},
{'source': 0, 'target': 19},
{'source': 0, 'target': 21},
{'source': 0, 'target': 31},
{'source': 1, 'target': 2},
{'source': 1, 'target': 3},
{'source': 1, 'target': 7},
{'source': 1, 'target': 13},
{'source': 1, 'target': 17},
{'source': 1, 'target': 19},
{'source': 1, 'target': 21},
{'source': 1, 'target': 30},
{'source': 2, 'target': 3},
{'source': 2, 'target': 32},
{'source': 2, 'target': 7},
{'source': 2, 'target': 8},
{'source': 2, 'target': 9},
{'source': 2, 'target': 13},
{'source': 2, 'target': 27},
{'source': 2, 'target': 28},
{'source': 3, 'target': 7},
{'source': 3, 'target': 12},
{'source': 3, 'target': 13},
{'source': 4, 'target': 10},
{'source': 4, 'target': 6},
{'source': 5, 'target': 16},
{'source': 5, 'target': 10},
{'source': 5, 'target': 6},
{'source': 6, 'target': 16},
{'source': 8, 'target': 32},
{'source': 8, 'target': 30},
{'source': 8, 'target': 33},
{'source': 9, 'target': 33},
{'source': 13, 'target': 33},
{'source': 14, 'target': 32},
{'source': 14, 'target': 33},
{'source': 15, 'target': 32},
{'source': 15, 'target': 33},
{'source': 18, 'target': 32},
{'source': 18, 'target': 33},
{'source': 19, 'target': 33},
{'source': 20, 'target': 32},
{'source': 20, 'target': 33},
{'source': 22, 'target': 32},
{'source': 22, 'target': 33},
{'source': 23, 'target': 32},
{'source': 23, 'target': 25},
{'source': 23, 'target': 27},
{'source': 23, 'target': 29},
{'source': 23, 'target': 33},
{'source': 24, 'target': 25},
{'source': 24, 'target': 27},
{'source': 24, 'target': 31},
{'source': 25, 'target': 31},
{'source': 26, 'target': 33},
{'source': 26, 'target': 29},
{'source': 27, 'target': 33},
{'source': 28, 'target': 33},
{'source': 28, 'target': 31},
{'source': 29, 'target': 32},
{'source': 29, 'target': 33},
{'source': 30, 'target': 33},
{'source': 30, 'target': 32},
{'source': 31, 'target': 33},
{'source': 31, 'target': 32},
{'source': 32, 'target': 33}],
'multigraph': False,
'nodes': [{'club': 'Mr. Hi', 'color': 'purple', 'id': 0, 'size': 16},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 1, 'size': 9},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 2, 'size': 10},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 3, 'size': 6},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 4, 'size': 3},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 5, 'size': 4},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 6, 'size': 4},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 7, 'size': 4},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 8, 'size': 5},
{'club': 'Officer', 'color': 'orange', 'id': 9, 'size': 2},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 10, 'size': 3},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 11, 'size': 1},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 12, 'size': 2},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 13, 'size': 5},
{'club': 'Officer', 'color': 'orange', 'id': 14, 'size': 2},
{'club': 'Officer', 'color': 'orange', 'id': 15, 'size': 2},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 16, 'size': 2},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 17, 'size': 2},
{'club': 'Officer', 'color': 'orange', 'id': 18, 'size': 2},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 19, 'size': 3},
{'club': 'Officer', 'color': 'orange', 'id': 20, 'size': 2},
{'club': 'Mr. Hi', 'color': 'purple', 'id': 21, 'size': 2},
{'club': 'Officer', 'color': 'orange', 'id': 22, 'size': 2},
{'club': 'Officer', 'color': 'orange', 'id': 23, 'size': 5},
{'club': 'Officer', 'color': 'orange', 'id': 24, 'size': 3},
{'club': 'Officer', 'color': 'orange', 'id': 25, 'size': 3},
{'club': 'Officer', 'color': 'orange', 'id': 26, 'size': 2},
{'club': 'Officer', 'color': 'orange', 'id': 27, 'size': 4},
{'club': 'Officer', 'color': 'orange', 'id': 28, 'size': 3},
{'club': 'Officer', 'color': 'orange', 'id': 29, 'size': 4},
{'club': 'Officer', 'color': 'orange', 'id': 30, 'size': 4},
{'club': 'Officer', 'color': 'orange', 'id': 31, 'size': 6},
{'club': 'Officer', 'color': 'orange', 'id': 32, 'size': 12},
{'club': 'Officer', 'color': 'orange', 'id': 33, 'size': 17}]}
class NetworkXD3ForceLayout(mpld3.plugins.PluginBase):
"""A NetworkX to D3 Force Layout Plugin"""
JAVASCRIPT = """
mpld3.register_plugin("networkxd3forcelayout", NetworkXD3ForceLayoutPlugin);
NetworkXD3ForceLayoutPlugin.prototype = Object.create(mpld3.Plugin.prototype);
NetworkXD3ForceLayoutPlugin.prototype.constructor = NetworkXD3ForceLayoutPlugin;
NetworkXD3ForceLayoutPlugin.prototype.requiredProps = ["graph",
"ax_id",];
NetworkXD3ForceLayoutPlugin.prototype.defaultProps = { coordinates: "data",
gravity: 1,
charge: -30,
link_strength: 1,
friction: 0.9,
link_distance: 20,
maximum_stroke_width: 2,
minimum_stroke_width: 1,
nominal_stroke_width: 1,
maximum_radius: 10,
minimum_radius: 1,
nominal_radius: 5,
};
function NetworkXD3ForceLayoutPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
var color = d3.scale.category20();
NetworkXD3ForceLayoutPlugin.prototype.zoomScaleProp = function (nominal_prop, minimum_prop, maximum_prop) {
var zoom = this.ax.zoom;
scalerFunction = function() {
var prop = nominal_prop;
if (nominal_prop*zoom.scale()>maximum_prop) prop = maximum_prop/zoom.scale();
if (nominal_prop*zoom.scale()<minimum_prop) prop = minimum_prop/zoom.scale();
return prop
}
return scalerFunction;
}
NetworkXD3ForceLayoutPlugin.prototype.setupDefaults = function () {
this.zoomScaleStroke = this.zoomScaleProp(this.props.nominal_stroke_width,
this.props.minimum_stroke_width,
this.props.maximum_stroke_width)
this.zoomScaleRadius = this.zoomScaleProp(this.props.nominal_radius,
this.props.minimum_radius,
this.props.maximum_radius)
}
NetworkXD3ForceLayoutPlugin.prototype.zoomed = function() {
this.tick()
}
NetworkXD3ForceLayoutPlugin.prototype.draw = function(){
plugin = this
brush = this.fig.getBrush();
DEFAULT_NODE_SIZE = this.props.nominal_radius;
var height = this.fig.height
var width = this.fig.width
var graph = this.props.graph
var gravity = this.props.gravity.toFixed()
var charge = this.props.charge.toFixed()
var link_distance = this.props.link_distance.toFixed()
var link_strength = this.props.link_strength.toFixed()
var friction = this.props.friction.toFixed()
this.ax = mpld3.get_element(this.props.ax_id, this.fig)
var ax = this.ax;
this.ax.elements.push(this)
ax_obj = this.ax;
var width = d3.max(ax.x.range()) - d3.min(ax.x.range()),
height = d3.max(ax.y.range()) - d3.min(ax.y.range());
var color = d3.scale.category20();
this.xScale = d3.scale.linear().domain([0, 1]).range([0, width]) // ax.x;
this.yScale = d3.scale.linear().domain([0, 1]).range([height, 0]) // ax.y;
this.force = d3.layout.force()
.size([width, height]);
this.svg = this.ax.axes.append("g");
for(var i = 0; i < graph.nodes.length; i++){
var node = graph.nodes[i];
if (node.hasOwnProperty('x')) {
node.x = this.ax.x(node.x);
}
if (node.hasOwnProperty('y')) {
node.y = this.ax.y(node.y);
}
}
this.force
.nodes(graph.nodes)
.links(graph.links)
.linkStrength(link_strength)
.friction(friction)
.linkDistance(link_distance)
.charge(charge)
.gravity(gravity)
.start();
this.link = this.svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link")
.attr("stroke", "black")
.style("stroke-width", function (d) { return Math.sqrt(d.value); });
this.node = this.svg.selectAll(".node")
.data(graph.nodes)
.enter().append("circle")
.attr("class", "node")
.attr("r", function(d) {return d.size === undefined ? DEFAULT_NODE_SIZE : d.size ;})
.style("fill", function (d) { return d.color; });
this.node.append("title")
.text(function (d) { return d.name; });
this.force.on("tick", this.tick.bind(this));
this.setupDefaults()
};
NetworkXD3ForceLayoutPlugin.prototype.tick = function() {
this.link.attr("x1", function (d) { return this.ax.x(this.xScale.invert(d.source.x)); }.bind(this))
.attr("y1", function (d) { return this.ax.y(this.yScale.invert(d.source.y)); }.bind(this))
.attr("x2", function (d) { return this.ax.x(this.xScale.invert(d.target.x)); }.bind(this))
.attr("y2", function (d) { return this.ax.y(this.yScale.invert(d.target.y)); }.bind(this));
this.node.attr("transform", function (d) {
return "translate(" + this.ax.x(this.xScale.invert(d.x)) + "," + this.ax.y(this.yScale.invert(d.y)) + ")";
}.bind(this)
);
}
"""
def __init__(self, graph, ax,
gravity=1,
link_distance=20,
charge=-30,
node_size=5,
link_strength=1,
friction=0.9):
self.dict_ = {"type": "networkxd3forcelayout",
"graph": graph,
"ax_id": mpld3.utils.get_id(ax),
"gravity": gravity,
"charge": charge,
"friction": friction,
"link_distance": link_distance,
"link_strength": link_strength,
"nominal_radius": node_size}
import matplotlib.pyplot as plt
fig, axs = plt.subplots(1, 1, figsize=(10, 10))
ax = axs
mpld3.plugins.connect(fig, NetworkXD3ForceLayout(graph,
ax,
gravity=.5,
link_distance=20,
charge=-600,
friction=1
)
)
mpld3.show()
|
|
#!/usr/bin/env python
import hashlib
import math
import unittest
from binascii import hexlify, unhexlify
from collections import namedtuple
import ledger.merkle_verifier
import ledger.tree_hasher
from ledger import compact_merkle_tree
from ledger import error
class TreeHasherTest(unittest.TestCase):
sha256_empty_hash = \
b"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
sha256_leaves = [
(b"",
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"),
(b"101112131415161718191a1b1c1d1e1f",
b"3bfb960453ebaebf33727da7a1f4db38acc051d381b6da20d6d4e88f0eabfd7a")
]
sha256_nodes = [
(b"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
b"202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f",
b"1a378704c17da31e2d05b6d121c2bb2c7d76f6ee6fa8f983e596c2d034963c57")]
# array of bytestrings of the following literals in hex
test_vector_leaves = ["".join(chr(int(n, 16)) for n in s.split()).encode()
for s in [
"",
"00",
"10",
"20 21",
"30 31",
"40 41 42 43",
"50 51 52 53 54 55 56 57",
"60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f"]]
test_vector_hashes = [
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
b"fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125",
b"aeb6bcfe274b70a14fb067a5e5578264db0fa9b51af5e0ba159158f329e06e77",
b"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7",
b"4e3bbb1f7b478dcfe71fb631631519a3bca12c9aefca1612bfce4c13a86264d4",
b"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef",
b"ddb89be403809e325750d3d263cd78929c2942b7942a34b77e122c9594a74c8c",
b"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
]
def test_empty_hash(self):
hasher = ledger.tree_hasher.TreeHasher()
self.assertEqual(hexlify(hasher.hash_empty()),
TreeHasherTest.sha256_empty_hash)
def test_hash_leaves(self):
hasher = ledger.tree_hasher.TreeHasher()
for leaf, val in TreeHasherTest.sha256_leaves:
self.assertEqual(hasher.hash_leaf(unhexlify(leaf)),
unhexlify(val))
def test_hash_children(self):
hasher = ledger.tree_hasher.TreeHasher()
for left, right, val in TreeHasherTest.sha256_nodes:
x = hasher.hash_children(unhexlify(left),
unhexlify(right))
self.assertEqual(hexlify(x), val)
def test_hash_full_invalid_index(self):
hasher = ledger.tree_hasher.TreeHasher()
self.assertRaises(IndexError, hasher._hash_full, "abcd", -5, -1)
self.assertRaises(IndexError, hasher._hash_full, "abcd", -1, 1)
self.assertRaises(IndexError, hasher._hash_full, "abcd", 1, 5)
self.assertRaises(IndexError, hasher._hash_full, "abcd", 2, 1)
def test_hash_full_empty(self):
hasher = ledger.tree_hasher.TreeHasher()
for i in range(0, 5):
self.assertEqual(hexlify(hasher._hash_full("abcd", i, i)[0]),
TreeHasherTest.sha256_empty_hash)
def test_hash_full_tree(self):
hasher = ledger.tree_hasher.TreeHasher()
self.assertEqual(hasher.hash_full_tree([]), hasher.hash_empty())
leaves = [c.encode() for c in "abcde"]
a, b, c, d, e = [hasher.hash_leaf(c) for c in leaves]
h = hasher.hash_children
root_hash = h(h(h(a, b),
h(c, d)),
e)
self.assertEqual(hasher.hash_full_tree(leaves), root_hash)
def test_hash_full_tree_test_vector(self):
hasher = ledger.tree_hasher.TreeHasher()
for i in range(len(TreeHasherTest.test_vector_leaves)):
test_vector = TreeHasherTest.test_vector_leaves[:i+1]
expected_hash = unhexlify(TreeHasherTest.test_vector_hashes[i])
self.assertEqual(hasher.hash_full_tree(test_vector), expected_hash)
class HexTreeHasher(ledger.tree_hasher.TreeHasher):
def __init__(self, hashfunc=hashlib.sha256):
self.hasher = ledger.tree_hasher.TreeHasher(hashfunc)
def hash_empty(self):
return hexlify(self.hasher.hash_empty())
def hash_leaf(self, data):
return hexlify(self.hasher.hash_leaf(unhexlify(data)))
def hash_children(self, left, right):
return hexlify(self.hasher.hash_children(unhexlify(left),
unhexlify(right)))
class CompactMerkleTreeTest(unittest.TestCase):
def setUp(self):
self.tree = compact_merkle_tree.CompactMerkleTree(HexTreeHasher())
def test_extend_from_empty(self):
for i in range(len(TreeHasherTest.test_vector_leaves)):
test_vector = TreeHasherTest.test_vector_leaves[:i+1]
expected_hash = TreeHasherTest.test_vector_hashes[i]
self.tree = compact_merkle_tree.CompactMerkleTree()
self.tree.extend(test_vector)
root = hexlify(self.tree.root_hash)
self.assertEqual(root, expected_hash)
def test_push_subtree_1(self):
for i in range(len(TreeHasherTest.test_vector_leaves)):
test_vector = TreeHasherTest.test_vector_leaves[:i+1]
self.tree = compact_merkle_tree.CompactMerkleTree()
self.tree.extend(test_vector)
self.tree._push_subtree([b"test leaf"])
self.assertEqual(len(self.tree), len(test_vector) + 1)
def test_extend_from_partial(self):
z = len(TreeHasherTest.test_vector_leaves)
for i in range(z):
self.tree = compact_merkle_tree.CompactMerkleTree()
# add up to i
test_vector = TreeHasherTest.test_vector_leaves[:i+1]
expected_hash = TreeHasherTest.test_vector_hashes[i]
self.tree.extend(test_vector)
self.assertEqual(self.tree.root_hash_hex, expected_hash)
# add up to z
test_vector = TreeHasherTest.test_vector_leaves[i+1:]
expected_hash = TreeHasherTest.test_vector_hashes[z-1]
self.tree.extend(test_vector)
self.assertEqual(self.tree.root_hash_hex, expected_hash)
class MerkleVerifierTest(unittest.TestCase):
# (old_tree_size, new_tree_size, old_root, new_root, proof)
# Test vectors lifted from the C++ branch.
sha256_proofs = [
(1, 1,
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
[]),
(1, 8,
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
b"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
[b"96a296d224f285c67bee93c30f8a309157f0daa35dc5b87e410b78630a09cfc7",
b"5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e",
b"6b47aaf29ee3c2af9af889bc1fb9254dabd31177f16232dd6aab035ca39bf6e4"]),
(6, 8,
b"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef",
b"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
[b"0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
b"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0",
b"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"]),
(2, 5,
b"fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125",
b"4e3bbb1f7b478dcfe71fb631631519a3bca12c9aefca1612bfce4c13a86264d4",
[b"5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e",
b"bc1a0643b12e4d2d7c77918f44e0f4f79a838b6cf9ec5b5c283e1f4d88599e6b"])
]
# Data for leaf inclusion proof test
sha256_audit_path = [
b"1a208aeebcd1b39fe2de247ee8db9454e1e93a312d206b87f6ca9cc6ec6f1ddd",
b"0a1b78b383f580856f433c01a5741e160d451c185910027f6cc9f828687a40c4",
b"3d1745789bc63f2da15850de1c12a5bf46ed81e1cc90f086148b1662e79aab3d",
b"9095b61e14d8990acf390905621e62b1714fb8e399fbb71de5510e0aef45affe",
b"0a332b91b8fab564e6afd1dd452449e04619b18accc0ff9aa8393cd4928451f2",
b"2336f0181d264aed6d8f3a6507ca14a8d3b3c3a23791ac263e845d208c1ee330",
b"b4ce56e300590500360c146c6452edbede25d4ed83919278749ee5dbe178e048",
b"933f6ddc848ea562e4f9c5cfb5f176941301dad0c6fdb9d1fbbe34fac1be6966",
b"b95a6222958a86f74c030be27c44f57dbe313e5e7c7f4ffb98bcbd3a03bb52f2",
b"daeeb3ce5923defd0faeb8e0c210b753b85b809445d7d3d3cd537a9aabaa9c45",
b"7fadd0a13e9138a2aa6c3fdec4e2275af233b94812784f66bcca9aa8e989f2bc",
b"1864e6ba3e32878610546539734fb5eeae2529991f130c575c73a7e25a2a7c56",
b"12842d1202b1dc6828a17ab253c02e7ce9409b5192430feba44189f39cc02d66",
b"29af64b16fa3053c13d02ac63aa75b23aa468506e44c3a2315edc85d2dc22b11",
b"b527b99934a0bd9edd154e449b0502e2c499bba783f3bc3dfe23364b6b532009",
b"4584db8ae8e351ace08e01f306378a92bfd43611714814f3d834a2842d69faa8",
b"86a9a41573b0d6e4292f01e93243d6cc65b30f06606fc6fa57390e7e90ed580f",
b"a88b98fbe84d4c6aae8db9d1605dfac059d9f03fe0fcb0d5dff1295dacba09e6",
b"06326dc617a6d1f7021dc536026dbfd5fffc6f7c5531d48ef6ccd1ed1569f2a1",
b"f41fe8fdc3a2e4e8345e30216e7ebecffee26ff266eeced208a6c2a3cf08f960",
b"40cf5bde8abb76983f3e98ba97aa36240402975674e120f234b3448911090f8d",
b"b3222dc8658538079883d980d7fdc2bef9285344ea34338968f736b04aeb387a"]
raw_hex_leaf = (
b"00000000013de9d2b29b000000055b308205573082043fa00302010202072b777b56df"
b"7bc5300d06092a864886f70d01010505003081ca310b30090603550406130255533110"
b"300e060355040813074172697a6f6e61311330110603550407130a53636f7474736461"
b"6c65311a3018060355040a1311476f44616464792e636f6d2c20496e632e3133303106"
b"0355040b132a687474703a2f2f6365727469666963617465732e676f64616464792e63"
b"6f6d2f7265706f7369746f72793130302e06035504031327476f204461646479205365"
b"637572652043657274696669636174696f6e20417574686f726974793111300f060355"
b"040513083037393639323837301e170d3133303131343038353035305a170d31353031"
b"31343038353035305a305331163014060355040a130d7777772e69646e65742e6e6574"
b"3121301f060355040b1318446f6d61696e20436f6e74726f6c2056616c696461746564"
b"311630140603550403130d7777772e69646e65742e6e657430820122300d06092a8648"
b"86f70d01010105000382010f003082010a0282010100d4e4a4b1bbc981c9b8166f0737"
b"c113000aa5370b21ad86a831a379de929db258f056ba0681c50211552b249a02ec00c5"
b"37e014805a5b5f4d09c84fdcdfc49310f4a9f9004245d119ce5461bc5c42fd99694b88"
b"388e035e333ac77a24762d2a97ea15622459cc4adcd37474a11c7cff6239f810120f85"
b"e014d2066a3592be604b310055e84a74c91c6f401cb7f78bdb45636fb0b1516b04c5ee"
b"7b3fa1507865ff885d2ace21cbb28fdaa464efaa1d5faab1c65e4c46d2139175448f54"
b"b5da5aea956719de836ac69cd3a74ca049557cee96f5e09e07ba7e7b4ebf9bf167f4c3"
b"bf8039a4cab4bec068c899e997bca58672bd7686b5c85ea24841e48c46f76830390203"
b"010001a38201b6308201b2300f0603551d130101ff04053003010100301d0603551d25"
b"0416301406082b0601050507030106082b06010505070302300e0603551d0f0101ff04"
b"04030205a030330603551d1f042c302a3028a026a0248622687474703a2f2f63726c2e"
b"676f64616464792e636f6d2f676473312d38332e63726c30530603551d20044c304a30"
b"48060b6086480186fd6d010717013039303706082b06010505070201162b687474703a"
b"2f2f6365727469666963617465732e676f64616464792e636f6d2f7265706f7369746f"
b"72792f30818006082b0601050507010104743072302406082b06010505073001861868"
b"7474703a2f2f6f6373702e676f64616464792e636f6d2f304a06082b06010505073002"
b"863e687474703a2f2f6365727469666963617465732e676f64616464792e636f6d2f72"
b"65706f7369746f72792f67645f696e7465726d6564696174652e637274301f0603551d"
b"23041830168014fdac6132936c45d6e2ee855f9abae7769968cce730230603551d1104"
b"1c301a820d7777772e69646e65742e6e6574820969646e65742e6e6574301d0603551d"
b"0e041604144d3ae8a87ddcf046764021b87e7d8d39ddd18ea0300d06092a864886f70d"
b"01010505000382010100ad651b199f340f043732a71178c0af48e22877b9e5d99a70f5"
b"d78537c31d6516e19669aa6bfdb8b2cc7a145ba7d77b35101f9519e03b58e692732314"
b"1383c3ab45dc219bd5a584a2b6333b6e1bbef5f76e89b3c187ef1d3b853b4910e895a4"
b"57dbe7627e759f56c8484c30b22a74fb00f7b1d7c41533a1fd176cd2a2b06076acd7ca"
b"ddc6ca6d0c2a815f9eb3ef0d03d27e7eebd7824c78fdb51679c03278cfbb2d85ae65a4"
b"7485cb733fc1c7407834f7471ababd68f140983817c6f388b2f2e2bfe9e26608f9924f"
b"16473462d136427d1f2801e4b870b078c20ec4ba21e22ab32a00b76522d523825bcabb"
b"8c7b6142d624be8d2af69ecc36fb5689572a0f59c00000")
leaf_hash = (
b"7a395c866d5ecdb0cccb623e011dbc392cd348d1d1d72776174e127a24b09c78")
leaf_index = 848049
tree_size = 3630887
expected_root_hash = (
b"78316a05c9bcf14a3a4548f5b854a9adfcd46a4c034401b3ce7eb7ac2f1d0ecb")
def setUp(self):
self.verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
self.STH = namedtuple("STH", ["sha256_root_hash", "tree_size"])
self.ones = b"11" * 32
self.zeros = b"00" * 32
def test_verify_tree_consistency(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
for test_vector in MerkleVerifierTest.sha256_proofs:
self.assertTrue(verifier.verify_tree_consistency(*test_vector))
def test_verify_tree_consistency_always_accepts_empty_tree(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
# Give some bogus proof too; it should be ignored.
self.assertTrue(verifier.verify_tree_consistency(
0, 1,
b"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
[b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"]
))
def test_verify_tree_consistency_for_equal_tree_sizes(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
# Equal tree sizes and hashes, and a bogus proof that should be ignored.
self.assertTrue(verifier.verify_tree_consistency(
3, 3,
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
[b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"]
))
# Equal tree sizes but different hashes.
self.assertRaises(
error.ConsistencyError, verifier.verify_tree_consistency, 3, 3,
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01e",
b"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
[])
def test_verify_tree_consistency_newer_tree_is_smaller(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
self.assertRaises(
ValueError, verifier.verify_tree_consistency, 5, 2,
b"4e3bbb1f7b478dcfe71fb631631519a3bca12c9aefca1612bfce4c13a86264d4",
b"fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125",
[b"5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e",
b"bc1a0643b12e4d2d7c77918f44e0f4f79a838b6cf9ec5b5c283e1f4d88599e6b"]
)
def test_verify_tree_consistency_proof_too_short(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
self.assertRaises(
error.ProofError, verifier.verify_tree_consistency, 6, 8,
b"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef",
b"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
[b"0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
b"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0"]
)
def test_verify_tree_consistency_bad_second_hash(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
# A bit has been flipped in the second hash.
self.assertRaises(
error.ProofError, verifier.verify_tree_consistency, 6, 8,
b"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef",
b"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604329",
[b"0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
b"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0",
b"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"]
)
def test_verify_tree_consistency_both_hashes_bad(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
# A bit has been flipped in both hashes.
self.assertRaises(
error.ProofError, verifier.verify_tree_consistency, 6, 8,
b"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ee",
b"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604329",
[b"0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
b"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0",
b"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"]
)
def test_verify_tree_consistency_bad_first_hash(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
# A bit has been flipped in the first hash.
self.assertRaises(
error.ConsistencyError, verifier.verify_tree_consistency, 6, 8,
b"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ee",
b"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
[b"0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
b"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0",
b"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"]
)
def test_calculate_root_hash_good_proof(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
root_hash = verifier._calculate_root_hash_from_audit_path(
self.leaf_hash, self.leaf_index, self.sha256_audit_path[:],
self.tree_size)
self.assertEqual(root_hash, self.expected_root_hash)
def test_calculate_root_too_short_proof(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
leaf_index = self.leaf_index + int(
math.pow(2, len(self.sha256_audit_path) + 1))
self.assertRaises(
error.ProofError,
verifier._calculate_root_hash_from_audit_path,
self.leaf_hash, leaf_index, self.sha256_audit_path[:],
self.tree_size)
def test_verify_leaf_inclusion_good_proof(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
sth = self.STH(self.expected_root_hash, self.tree_size)
self.assertTrue(
verifier.verify_leaf_inclusion(
self.raw_hex_leaf, self.leaf_index, self.sha256_audit_path,
sth))
def test_verify_leaf_inclusion_bad_proof(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
# Expect this test to fail by providing an incorrect root hash.
sth = self.STH(self.zeros, self.tree_size)
self.assertRaises(
error.ProofError, verifier.verify_leaf_inclusion,
self.raw_hex_leaf, self.leaf_index, self.sha256_audit_path, sth)
def test_verify_leaf_inclusion_incorrect_length_proof(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
sth = self.STH(self.zeros, 4)
# Too long a proof
self.assertRaises(
error.ProofError, verifier.verify_leaf_inclusion,
self.ones, 0, [self.zeros, self.zeros, self.zeros], sth)
# Too short a proof
self.assertRaises(
error.ProofError, verifier.verify_leaf_inclusion,
self.ones, 0, [self.zeros], sth)
def test_verify_leaf_inclusion_single_node_in_tree(self):
# If there is only one entry in the tree, the tree root hash should be
# equal to the leaf hash.
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
sth = self.STH(self.leaf_hash, 1)
self.assertTrue(
verifier.verify_leaf_inclusion(self.raw_hex_leaf, 0, [], sth))
def test_verify_leaf_inclusion_rightmost_node_in_tree(self):
# Show that verify_leaf_inclusion works when required to check a proof
# for the right-most node: In a tree of 8 nodes, ask for inclusion
# proof check for leaf 7.
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
hh = HexTreeHasher()
h_s1 = hh.hash_leaf(self.ones)
h_c3 = hh.hash_children(self.zeros, h_s1)
h_c2 = hh.hash_children(self.zeros, h_c3)
h_root = hh.hash_children(self.zeros, h_c2)
sth = self.STH(h_root, 8)
self.assertTrue(
verifier.verify_leaf_inclusion(
self.ones, 7, [self.zeros, self.zeros, self.zeros], sth))
def test_verify_leaf_inclusion_rightmost_node_in_unbalanced_odd_tree(
self):
# Show that verify_leaf_inclusion works when required to check a proof
# for the right-most, even-indexed node: In a tree of 5 nodes, ask for
# inclusion proof check for leaf 4 (the 5th).
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
hh = HexTreeHasher()
h_s1 = hh.hash_leaf(self.ones)
h_root = hh.hash_children(self.zeros, h_s1)
sth = self.STH(h_root, 5)
self.assertTrue(
verifier.verify_leaf_inclusion(self.ones, 4, [self.zeros, ], sth))
def test_verify_leaf_inclusion_rightmost_node_in_unbalanced_tree_odd_node(
self):
# Show that verify_leaf_inclusion works when required to check a proof
# for the right-most, odd-indexed node: In a tree of 6 nodes, ask for
# inclusion proof check for leaf 5 (the 6th).
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
hh = HexTreeHasher()
h_s1 = hh.hash_leaf(self.ones)
h_l2 = hh.hash_children(self.zeros, h_s1)
h_root = hh.hash_children(self.zeros, h_l2)
sth = self.STH(h_root, 6)
self.assertTrue(
verifier.verify_leaf_inclusion(
self.ones, 5, [self.zeros, self.zeros], sth))
def test_verify_leaf_inclusion_rightmost_node_in_unbalanced_even_tree(
self):
# Show that verify_leaf_inclusion works when required to check a proof
# for the right-most, odd-indexed node: In a tree of 6 nodes, ask for
# inclusion proof check for leaf 4 (the 5th).
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
hh = HexTreeHasher()
h_s1 = hh.hash_leaf(self.ones)
h_l2 = hh.hash_children(h_s1, self.zeros)
h_root = hh.hash_children(self.zeros, h_l2)
sth = self.STH(h_root, 6)
self.assertTrue(
verifier.verify_leaf_inclusion(
self.ones, 4, [self.zeros, self.zeros], sth))
def test_verify_leaf_inclusion_throws_on_bad_indices(self):
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
sth = self.STH("", 6)
self.assertRaises(ValueError,
verifier.verify_leaf_inclusion, "", -3, [], sth)
negative_sth = self.STH("", -3)
self.assertRaises(ValueError,
verifier.verify_leaf_inclusion, "", 3, [],
negative_sth)
def test_verify_leaf_inclusion_all_nodes_all_tree_sizes_up_to_4(self):
leaves = ["aa", "bb", "cc", "dd"]
hh = HexTreeHasher()
leaf_hashes = [hh.hash_leaf(l) for l in leaves]
hc = hh.hash_children
proofs_per_tree_size = {
1: [[] ],
2: [[leaf_hashes[1]], [leaf_hashes[0]]],
3: [[leaf_hashes[1], leaf_hashes[2]], # leaf 0
[leaf_hashes[0], leaf_hashes[2]], # leaf 1
[hc(leaf_hashes[0], leaf_hashes[1])]], # leaf 2
4: [[leaf_hashes[1], hc(leaf_hashes[2], leaf_hashes[3])], # leaf 0
[leaf_hashes[0], hc(leaf_hashes[2], leaf_hashes[3])], # leaf 1
[leaf_hashes[3], hc(leaf_hashes[0], leaf_hashes[1])], # leaf 2
[leaf_hashes[2], hc(leaf_hashes[0], leaf_hashes[1])], # leaf 3
]
}
tree = compact_merkle_tree.CompactMerkleTree(hasher=HexTreeHasher())
verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
# Increase the tree by one leaf each time
for i in range(4):
tree.append(leaves[i])
tree_size = i + 1
# ... and check inclusion proof validates for each node
# of the tree
for j in range(tree_size):
proof = proofs_per_tree_size[tree_size][j]
sth = self.STH(tree.root_hash, tree_size)
self.assertTrue(
verifier.verify_leaf_inclusion(
leaves[j], j, proof, sth))
if __name__ == "__main__":
unittest.main()
|
|
import os
import os.path
import time
import itertools
import re
from collections import defaultdict
import pymysql
import pymysql.cursors
from collections import OrderedDict
pymysql.cursors.DictCursorMixin.dict_type = OrderedDict
RE_INSERT = re.compile(r'(\s*INSERT\s.+\sVALUES\s+).+\Z', re.IGNORECASE)
class PerspectiveMatrix( object ):
def __init__( self, fields, rows ):
st = time.time()
self.fields = [ f.lower() for f in fields ]
self.pmatrix = defaultdict(dict)
self.kmatrix = defaultdict(set)
self.parser_fields()
self.usedtime = time.time() - st
self.addrows( rows )
def same_fields( self, fields ):
return bool( self.fields == [ f.lower() for f in fields ] )
def parser_fields( self ):
self.kpos = []
self.vpos = []
self.kcols = {}
self.vcols = {}
for i, f in enumerate(self.fields):
vfunc = self.is_valuecol( f.lower() )
if vfunc is None :
self.kcols[f] = i
self.kpos.append(i)
else :
self.vcols[f] = vfunc
self.vpos.append( ( i, f, vfunc ) )
return
def addrow( self, row ):
kc = [ tuple(sorted(comb)) for cr in range(len(self.kpos)+1) for comb in itertools.combinations(self.kpos, cr) ]
for comb in kc :
k = tuple( [ (ki, row[ki]) for ki in comb ] )
#print(k)
self.makeup( self.pmatrix[k], row )
for ki in self.kpos :
self.kmatrix[self.fields[ki]].add(row[ki])
return
def addrows( self, rows ):
st = time.time()
for row in rows :
self.addrow( row )
self.usedtime += (time.time()-st)
return
def makeup( self, a, b ):
if a == {} :
for vi, fn, vfunc in self.vpos :
a[fn] = b[vi]
return
for vi, fn, vfunc in self.vpos :
#a = vdict.get( fn )
#if a is None :
# vdict[fn] = b
# continue
a[fn] = vfunc( [a[fn], b[vi]] )
return
def is_valuecol( self, ki ):
if ki.startswith( 'max(' ) and ki.endswith( ')' ):
return max
if ki.startswith( 'min(' ) and ki.endswith( ')' ):
return min
if ki.startswith( 'sum(' ) and ki.endswith( ')' ):
return sum
return None
def __getitem__( self, conds ):
return self.get( conds )
def get( self, conds, defaultvalue=None ):
conds = tuple( sorted([ (self.kcols[k], v) for k, v in conds.items() ]) )
defaultvalue = {} if defaultvalue is None else defaultvalue
return self.pmatrix.get( conds, defaultvalue )
def get_keys( self, k ):
return sorted(list(self.kmatrix[k]))
class PerspectiveMatrixCursorMixin(object):
def _do_get_result(self):
super(PerspectiveMatrixCursorMixin, self)._do_get_result()
fields = []
if self.description:
for f in self._result.fields:
name = f.name
if name in fields:
name = f.table_name + '.' + name
fields.append(name)
self._fields = fields
def fetchall( self, pm = None ):
''' Fetch all the rows '''
self._check_executed()
if self._rows is None:
return ()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
if pm == None :
return PerspectiveMatrix( self._fields, result )
if pm.same_fields( self._fields ) :
pm.addrows( result )
return pm
raise Exception('fields not equals')
class PerspectiveMatrixCursor(PerspectiveMatrixCursorMixin, pymysql.cursors.Cursor):
"""A cursor which returns results as PerspectiveMatrix"""
class Database(object):
conf = {}
default_dbargs = {
'charset' : 'utf8mb4',
'cursorclass' : pymysql.cursors.DictCursor,
'connect_timeout' : 3.0,
'autocommit' : True
}
@classmethod
def loadconfig( cls, filename="database.yaml" ):
if not os.path.exists( filename ) :
cls.conf = {}
return
import yaml
with open( filename, 'r') as fp :
conf = yaml.load(fp)
cls.conf = conf
return
def __init__( self, database=None, **kwargs ):
if database is None and 'host' not in kwargs:
raise Exception('argument error.')
dbargs = self.default_dbargs.copy()
if database != None :
dbargs.update( self.conf[database] )
dbargs.update( kwargs )
self._dbargs = dbargs
self.makeconn()
return
def makeconn( self ):
self.conn = pymysql.connect(**self._dbargs)
return
def __call__( self, sql, args=() ):
return self.execute( sql, args )
def tuple( self, sql, args=() ):
return self.execute( sql, args, cursor=pymysql.cursors.Cursor )
def matrix( self, sql, args=(), pm=None ):
return self.execute( sql, args, cursor=PerspectiveMatrixCursor, pm=pm )
def execute( self, sql, args, cursor=None, pm=None ):
ee = None
oe_retry = ( 2006, )
fetch_kwargs = {} if pm is None else {'pm':pm}
for i in range(int(self.conf['connection']['retrys'])):
#st = time.time()
try:
with self.conn.cursor( cursor ) as cursor:
cursor.execute( sql, args )
if RE_INSERT.match( sql ):
return cursor.lastrowid
return cursor.fetchall( **fetch_kwargs )
except pymysql.err.OperationalError as e :
ee = e
if e.args[0] in oe_retry :
self.makeconn()
finally :
pass
#usedtime = time.time() - st
#print( 'SQL used time:', usedtime )
raise ee
Database.loadconfig()
|
|
"""
jqueue Protocol
---------------
The jqueue protocol consists of 6 different types of messages, which have a
particular wire protocol as well as a logical form - this module implements
both the wire protocol as well as the logical form, so that messages can not
only be serialized and unserialzed, they can also be manipulated as Python
objects.
These types of messages are:
- RequestJob(ttl)
- Job(id, data)
- Ping(id)
- SubmitResult(id, data)
- Ok()
- Error(type)
"""
from collections import namedtuple
from enum import Enum
import struct
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
from jqueue import utils
RequestJob = namedtuple('RequestJob', ['ttl'])
Job = namedtuple('Job', ['id', 'data'])
Ping = namedtuple('Ping', ['id'])
SubmitResult = namedtuple('SubmitResult', ['id', 'data'])
Ok = namedtuple('Ok', [])
Error = namedtuple('Error', ['type'])
class Errors(Enum):
"""
The types of errors that the protocol is capable of representing.
"""
NO_JOB_AVAIL = 1
TTL_TOO_HIGH = 2
NOT_RESERVED = 3
DO_NOT_UNDERSTAND = 4
class MessageTypes(Enum):
"""
The types of messages in the protocol.
"""
RequestJob = 1
Job = 2
Ping = 3
SubmitResult = 4
Ok = 5
Error = 6
@singledispatch
def serialize(message):
raise ValueError('Cannot serialize a {}'.format(message))
@serialize.register(RequestJob)
def _(message):
"""
Job requests are serialized according to the following format:
<Type> <TTL>
byte 32-bit unsigned integer
"""
return struct.pack('<bI', MessageTypes.RequestJob.value, message.ttl)
@serialize.register(Job)
def _(message):
"""
Job replies are serialized according to the following format:
<Type> <ID> <Data>
byte netstring netstring
"""
if not isinstance(message.id, bytes) or not message.id:
raise ValueError('{}.id must be nonempty a bytes object'.format(message))
if not isinstance(message.data, bytes) or not message.data:
raise ValueError('{}.data must be a nonempty bytes object'.format(message))
return (struct.pack('<b', MessageTypes.Job.value) +
utils.to_netstring(message.id) + utils.to_netstring(message.data))
@serialize.register(Ping)
def _(message):
"""
Ping messages are serialized according to the following format:
<Type> <ID>
byte netstring
"""
if not isinstance(message.id, bytes) or not message.id:
raise ValueError('{}.id must be nonempty a bytes object'.format(message))
return struct.pack('<b', MessageTypes.Ping.value) + utils.to_netstring(message.id)
@serialize.register(SubmitResult)
def _(message):
"""
Submission messages are serialized according to the following format:
<Type> <ID> <Data>
byte netstring netstring
"""
if not isinstance(message.id, bytes) or not message.id:
raise ValueError('{}.id must be nonempty a bytes object'.format(message))
if not isinstance(message.data, bytes) or not message.data:
raise ValueError('{}.data must be a nonempty bytes object'.format(message))
return (struct.pack('<b', MessageTypes.SubmitResult.value) +
utils.to_netstring(message.id) + utils.to_netstring(message.data))
@serialize.register(Ok)
def _(message):
"""
Ok messages are serialized according to the following format:
<Type>
byte
"""
return struct.pack('<b', MessageTypes.Ok.value)
@serialize.register(Error)
def _(message):
"""
Error messages are serialized according to the following format:
<Type> <Error>
byte byte
"""
return struct.pack('<bb', MessageTypes.Error.value, message.type.value)
def message_from_bytes(bytestring):
"""
Decodes a bytestring into a message, returning the decoded message as well
as any bytes left over after the decoding. Note that if the given data is
not enough to decode a complete message, than ``(None, bytestring)`` is
returned.
"""
# If we fail at decoding at any point, then this is what is returned
fail_return = (None, bytestring)
if not bytestring:
return fail_return
try:
msg_type = MessageTypes(bytestring[0])
except ValueError:
# The message byte is invalid, and we can't do anything that will make
# it valid in the future, since it is corrupted somehow.
raise ValueError('Incorrect header byte in message')
payload = bytestring[1:]
if msg_type == MessageTypes.RequestJob:
if len(payload) < 4:
return fail_return
(ttl,) = struct.unpack('<I', payload[:4])
return RequestJob(ttl), payload[4:]
elif msg_type == MessageTypes.Job:
id, payload_rest = utils.netstring_from_buffer(payload)
if not id:
return fail_return
data, rest = utils.netstring_from_buffer(payload_rest)
if not data:
return fail_return
return Job(id, data), rest
elif msg_type == MessageTypes.Ping:
id, rest = utils.netstring_from_buffer(payload)
if not id:
return fail_return
return Ping(id), rest
elif msg_type == MessageTypes.SubmitResult:
id, payload_rest = utils.netstring_from_buffer(payload)
if not id:
return fail_return
data, rest = utils.netstring_from_buffer(payload_rest)
if not data:
return fail_return
return SubmitResult(id, data), rest
elif msg_type == MessageTypes.Ok:
return Ok(), payload
elif msg_type == MessageTypes.Error:
if len(payload) < 1:
return fail_return
return Error(Errors(payload[0])), payload[1:]
|
|
"""cl.actors"""
from __future__ import absolute_import, with_statement
import sys
import traceback
from itertools import count
from operator import itemgetter
from kombu import Consumer, Exchange, Queue
from kombu.common import (collect_replies, ipublish, isend_reply,
maybe_declare, uuid)
from kombu.log import Log
from kombu.pools import producers
from kombu.utils import kwdict, reprcall, reprkwargs
from kombu.utils.encoding import safe_repr
from . import __version__
from . import exceptions
from .results import AsyncResult
from .utils import cached_property, shortuuid
__all__ = ["Actor"]
builtin_fields = {"ver": __version__}
class ActorType(type):
def __repr__(self):
name = self.name
if not name:
try:
name = self.__name__
except AttributeError:
name = self.__class__.__name__
return "<@actor: %s>" % (name, )
class Actor(object):
__metaclass__ = ActorType
AsyncResult = AsyncResult
Error = exceptions.clError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = "persistent"
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = ("direct", )
#: Default serializer used to send messages and reply messages.
serializer = "json"
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 10.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchanged used for replies.
reply_exchange = Exchange("cl.reply", "direct")
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {"max_retries": 100,
"interval_start": 0,
"interval_max": 1,
"interval_step": 0.2}
#: returns the next anonymous ticket number
#: used for identifying related logs.
next_anon_ticket = count(1).next
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {"rr": "__rr__",
"round-robin": "__rr__",
"scatter": "__scatter__"}
meta = {}
class state:
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.exchange = exchange or self.exchange
self.agent = agent
self.type_to_queue = {"direct": self.get_direct_queue,
"round-robin": self.get_rr_queue,
"scatter": self.get_scatter_queue}
if self.default_fields is None:
self.default_fields = {}
if not self.exchange:
self.exchange = Exchange("cl.%s" % (self.name, ), "direct",
auto_delete=True)
logger_name = self.name
if self.agent:
logger_name = "%s#%s" % (self.name, shortuuid(self.agent.id, ))
self.log = Log("!<%s>" % (logger_name, ), logger=logger)
self.state = self.contribute_to_state(self.construct_state())
self.setup()
def setup(self):
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def maybe_setattr(self, obj, attr, value):
if not hasattr(obj, attr):
setattr(obj, attr, value)
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in map.iteritems():
self.maybe_setattr(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
return self.contribute_to_object(state, {
"actor": self,
"agent": self.agent,
"log": self.log,
"Next": self.Next,
"NoRouteError": self.NoRouteError,
"NoReplyError": self.NoReplyError})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type="round-robin",
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def scatter(self, method, args={}, nowait=False, **kwargs):
"""Broadcast method to all agents.
In this context the reply limit is disabled, and the timeout
is set to 1 by default, which means we collect all the replies
that managed to be sent within the requested timeout.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the replies.
"""
kwargs.setdefault("timeout", 2)
r = self.call_or_cast(method, args, type="scatter",
nowait=nowait, **kwargs)
if not nowait:
return r.gather(**kwargs)
def get_default_scatter_limit(self):
if self.agent:
return self.agent.get_default_scatter_limit(self.name)
return None
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:keyword args: Dictionary of arguments for the method.
:keyword nowait: If false the call will be block until the result
is available and return it (default), if true the call will be
non-blocking.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a unique queue that can be used to listen for messages
to this class."""
return Queue(self.id, self.exchange, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
return Queue("%s.%s.scatter" % (self.name, self.id), self.exchange,
routing_key=self.type_to_rkey["scatter"],
auto_delete=True)
def get_rr_queue(self):
return Queue(self.exchange.name + ".rr", self.exchange,
routing_key=self.type_to_rkey["round-robin"],
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
"x-expires": int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor."""
kwargs.setdefault("no_ack", self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def _publish(self, body, producer, before=None, **props):
if before is not None:
before(producer.connection, producer.channel)
maybe_declare(props["exchange"], producer.channel)
return producer.publish(body, **props)
def cast(self, method, args={}, before=None, retry=None,
retry_policy=None, type=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {"class": self.name, "method": method, "args": args}
exchange = self.exchange
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type:
props.setdefault("routing_key", self.type_to_rkey[type])
props.setdefault("routing_key", self.default_routing_key)
props.setdefault("serializer", self.serializer)
props = dict(props, exchange=exchange, before=before)
ipublish(producers[self._connection], self._publish,
(body, ), dict(props, exchange=exchange, before=before),
**(retry_policy or {}))
def call(self, method, args={}, retry=False, retry_policy=None, **props):
"""Send message to actor and return :class:`AsyncResult`."""
ticket = uuid()
reply_q = self.get_reply_queue(ticket)
def before(connection, channel):
reply_q(channel).declare()
self.cast(method, args, before,
**dict(props, reply_to=ticket))
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties["reply_to"])
except self.Next:
# don't reply, delegate to other agent.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
return isend_reply(producers[self._connection],
self.reply_exchange, req, body, props)
def on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get("reply_to"):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault("timeout", self.default_timeout)
if "limit" not in kwargs:
kwargs["limit"] = self.get_default_scatter_limit()
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith("_"):
raise KeyError(method)
return method
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{"ok": return_value, **default_fields}
If the method raised an exception the return value
will be::
{"nok": [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = "%s" % (shortuuid(ticket), )
else:
ticket = sticket = str(self.next_anon_ticket())
try:
method, args = itemgetter("method", "args")(body)
self.log.info("#%s --> %s",
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {"ok": act(**kwdict(args or {}))}
self.log.info("#%s <-- %s", sticket, reprkwargs(r))
except self.Next:
raise
except Exception, exc:
einfo = sys.exc_info()
r = {"nok": [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error("#%s <-- nok=%r", sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return "".join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return "%s.%s" % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError("Actor is not bound to a connection.")
return self.connection
@cached_property
def _default_fields(self):
return dict(builtin_fields, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
return self.agent.id
|
|
import os, sys
import time
from ConfigParser import ConfigParser
import boto
from boto import ec2
from boto.ec2 import elb
from boto.ec2.connection import EC2Connection
from boto.ec2.elb import HealthCheck
from fabric.api import env, execute, local
from fabric.tasks import Task
from fab_deploy import functions
from utils import get_security_group
DEFAULT_AMI = 'ami-5965401c' # ubuntu 12.04 x86_64
DEFAULT_INSTANCE_TYPE = 'm1.medium'
DEFAULT_REGION = 'us-west-1'
def get_ec2_connection(server_type, **kwargs):
"""
Create and return a valid connection to AWS.
To establish a valid connection, aws_access_key and aws_secret_key have to
be defined in a file specified by env.AWS_CREDENTIAL, with a format similar
to server.ini file. You should define env.AWS_CREDENTIAL in your fabfile.
By default, this function looks into $PROJECT_DIR/deploy/amazon.ini for the
credential information, and this file should has a section named 'amazon-aws'
and containing lines defining aws_access_key and aws_secret_key, like below
[amazon-aws]
aws_access_key =
aws_secret_key =
"""
amzn = env.get('AWS_CREDENTIAL',
os.path.join(env.deploy_path, 'amazon.ini'))
if not os.path.exists(amzn):
print ("Cannot find environment variable AMAZON_CREDENTIALS which should"
" point to a file with your aws_access_key and aws_secret_key info"
" inside. You may specify it through your fab env.")
sys.exit(1)
parser = ConfigParser()
parser.read(amzn)
aws_access_key = parser.get('amazon-aws', 'aws_access_key')
aws_secret_key = parser.get('amazon-aws', 'aws_secret_key')
if not aws_access_key or not aws_secret_key:
print "You must specify your amazon aws credentials to your env."
sys.exit(1)
region = kwargs.get('region', env.get('region'))
if not region:
region = DEFAULT_REGION
if server_type == 'ec2':
conn = ec2.connect_to_region(region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
return conn
elif server_type == 'elb':
conn = elb.connect_to_region(region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
return conn
class CreateKeyPair(Task):
"""
Create an AWS key pair.
This task should be run before you try to add any type of server, because
task api.add_server will look for the key pair on your local machine.
AWS requires a key pair to create EC2 instances, and the same key file is
needed to login to the instances. This task creates a key pair, and
save its content in a file located under the same directory as
env.AWS_CREDENTIAL file. The key name and file location will be registered
into the file specified by env.AWS_CREDENTIAL.
You are responsible to keep the file in a secure place and never lose it.
Make your own decision if you should push the key file to remote repo, or
let git ignore it.
"""
name = 'create_key'
serial = True
section = 'amazon-aws'
def run(self, **kwargs):
conn = get_ec2_connection(server_type='ec2', **kwargs)
sys.stdout.write("Please give a name to the key: ")
amzn = env.get('AWS_CREDENTIAL',
os.path.join(env.deploy_path, 'amazon.ini'))
key_dir = os.path.dirname(amzn)
while True:
key_name = raw_input()
key_file = os.path.join(key_dir, key_name+'.pem')
key = conn.get_key_pair(key_name)
if key:
if os.path.exists(key_file):
print ("Looks like key file %s already exists on your "
"machine. I will skip creating, and just use it."
%key_file)
break
else:
print ("Key '%s' already exist on AWS, but I couldn't "
"find it at %s. We need to create a new key, please"
"give a name to the key: " %(key.name, key_file))
continue
else:
key = conn.create_key_pair(key_name)
key.save(key_dir)
break
parser = ConfigParser()
parser.read(amzn)
if not parser.has_section(self.section):
parser.add_section(self.section)
parser.set(self.section, 'ec2-key-name', key.name)
parser.set(self.section, 'ec2-key-file', key_file)
fp = open(amzn, 'w')
parser.write(fp)
fp.close()
local('ssh-add %s' %key_file)
class New(Task):
"""
Provisions and set up a new amazon AWS EC2 instance
This task reads in a number of variables defining the properties of EC2
instance, and create it. Finally, if the instance is created successfully,
this task will output its properties, and set up the instance as certain
type of server by execute another task with the name of setup.***.
You may provide the following parameters through command line.
* **type**: Required. server types, can be db_server, app_server,
dev_server, or slave_db
* **region**: default is us-west-1
* **ami_id**: AMI ID
* **static_ip**: Set to true to use. By default this is not used.
"""
name = 'add_server'
serial = True
def run(self, **kwargs):
assert not env.hosts
conn = get_ec2_connection(server_type='ec2', **kwargs)
type = kwargs.get('type')
setup_name = 'setup.%s' % type
instance_type = DEFAULT_INSTANCE_TYPE
ami_id = kwargs.get('ami_id')
if not ami_id:
ami_id = DEFAULT_AMI
task = functions.get_task_instance(setup_name)
if task:
if hasattr(task, 'instance_type'):
instance_type = task.instance_type
if hasattr(task, 'ami'):
ami_id = task.ami
else:
print "I don't know how to add a %s server" % type
sys.exit(1)
amzn = env.get('AWS_CREDENTIAL',
os.path.join(env.deploy_path, 'amazon.ini'))
parser = ConfigParser()
parser.read(amzn)
key_name = parser.get('amazon-aws', 'ec2-key-name')
key_file = parser.get('amazon-aws', 'ec2-key-file')
if not key_name:
print "Sorry. You need to create key pair with create_key first."
sys.exit(1)
elif not os.path.exists(key_file):
print ("I find key %s in server.ini file, but the key file is not"
" on its location %s. There is something wrong. Please fix "
"it, or recreate key pair" % (key_name, key_file))
sys.exit(1)
image = conn.get_image(ami_id)
security_group = get_security_group(conn, task.config_section)
name = functions.get_remote_name(None, task.config_section,
name=kwargs.get('name'))
SERVER = {
'image_id': image.id,
'instance_type': instance_type,
'security_groups': [security_group],
'key_name': key_name,}
reservation = conn.run_instances(**SERVER)
print reservation
instance = reservation.instances[0]
while instance.state != 'running':
time.sleep(5)
instance.update()
print "...instance state: %s" % (instance.state)
conn.create_tags([instance.id], {"Name": name})
if not kwargs.get('static_ip', False):
ip = instance.ip_address
else:
elastic_ip = conn.allocate_address()
print "...Elastic IP %s allocated" % elastic_ip
elastic_ip.associate(instance.id)
ip = elastic_ip.public_ip
print "...EC2 instance is successfully created."
print "...wait 5 seconds for the server to be ready"
print "...while waiting, you may want to note down the following info"
time.sleep(5)
print "..."
print "...Instance using image: %s" % image.name
print "...Added into security group: %s" %security_group.name
print "...Instance ID: %s" % instance.id
print "...Public IP: %s" % ip
host_string = 'ubuntu@%s' % instance.public_dns_name
execute(setup_name, name=name, hosts=[host_string])
create_key = CreateKeyPair()
add_server = New()
|
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from osc_lib.tests import utils
from tripleoclient import constants
from tripleoclient.tests import fakes
from tripleoclient.v1 import overcloud_backup
from unittest.mock import call
class TestOvercloudBackup(utils.TestCommand):
def setUp(self):
super(TestOvercloudBackup, self).setUp()
# Get the command object to test
app_args = mock.Mock()
app_args.verbose_level = 1
self.app.options = fakes.FakeOptions()
self.cmd = overcloud_backup.BackupOvercloud(self.app, app_args)
self.app.client_manager.workflow_engine = mock.Mock()
self.workflow = self.app.client_manager.workflow_engine
self.inventory = '/tmp/test_inventory.yaml'
self.file = open(self.inventory, 'w').close()
@mock.patch('os.path.isfile')
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_noargs(self,
mock_playbook,
mock_access,
mock_isfile):
arglist = []
verifylist = []
mock_isfile.return_value = True
mock_access.return_value = True
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='cli-overcloud-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_create_recover_image',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={}
)
@mock.patch('os.path.isfile')
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_init(self,
mock_playbook,
mock_access,
mock_isfile):
arglist = [
'--init'
]
verifylist = []
mock_isfile.return_value = True
mock_access.return_value = True
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='prepare-overcloud-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_rear',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={}
)
@mock.patch('os.path.isfile')
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_init_nfs(self,
mock_playbook,
mock_access,
mock_isfile):
arglist = [
'--init',
'nfs'
]
verifylist = []
mock_isfile.return_value = True
mock_access.return_value = True
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='prepare-nfs-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_nfs_server',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={}
)
@mock.patch('os.path.isfile')
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_setup_nfs(self,
mock_playbook,
mock_access,
mock_isfile):
arglist = [
'--setup-nfs'
]
verifylist = []
mock_isfile.return_value = True
mock_access.return_value = True
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='prepare-nfs-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_nfs_server',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={}
)
@mock.patch('os.path.isfile')
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_setup_rear(self,
mock_playbook,
mock_access,
mock_isfile):
arglist = [
'--setup-rear',
]
verifylist = []
mock_isfile.return_value = True
mock_access.return_value = True
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='prepare-overcloud-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_rear',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={}
)
@mock.patch('os.path.isfile')
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_setup_rear_ironic(self,
mock_playbook,
mock_access,
mock_isfile):
arglist = [
'--setup-ironic',
]
verifylist = []
mock_isfile.return_value = True
mock_access.return_value = True
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='cli-overcloud-conf-ironic.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_rear',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={}
)
@mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True)
def test_overcloud_backup_setup_nfs_rear_with_inventory(self,
mock_playbook):
arglist = [
'--setup-nfs',
'--setup-rear',
'--inventory',
self.inventory
]
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
calls = [call(workdir=mock.ANY,
playbook='prepare-nfs-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_nfs_server',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={}),
call(workdir=mock.ANY,
playbook='prepare-overcloud-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_rear',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={})]
mock_playbook.assert_has_calls(calls)
@mock.patch('os.path.isfile')
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_setup_rear_extra_vars_inline(self,
mock_playbook,
mock_access,
mock_isfile):
arglist = [
'--setup-rear',
'--extra-vars',
'{"tripleo_backup_and_restore_nfs_server": "192.168.24.1"}'
]
verifylist = []
mock_isfile.return_value = True
mock_access.return_value = True
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
extra_vars_dict = {
'tripleo_backup_and_restore_nfs_server': '192.168.24.1'
}
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='prepare-overcloud-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_rear',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars=extra_vars_dict
)
@mock.patch('os.path.isfile')
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_setup_rear_with_extra_vars(self,
mock_playbook,
mock_access,
mock_isfile):
arglist = [
'--setup-rear',
'--extra-vars',
'/tmp/test_vars.yaml'
]
verifylist = []
mock_isfile.return_value = True
mock_access.return_value = True
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='prepare-overcloud-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_setup_rear',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars='/tmp/test_vars.yaml'
)
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_inventory(self, mock_playbook):
arglist = [
'--inventory',
self.inventory
]
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
mock_playbook.assert_called_once_with(
workdir=mock.ANY,
playbook='cli-overcloud-backup.yaml',
inventory=parsed_args.inventory,
tags='bar_create_recover_image',
skip_tags=None,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=3,
extra_vars={}
)
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_no_inventory(self, mock_playbook):
arglist = [
'--inventory',
'/tmp/no_inventory.yaml'
]
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaisesRegex(
RuntimeError,
'The inventory file',
self.cmd.take_action,
parsed_args)
@mock.patch('os.access')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_overcloud_backup_no_readable_inventory(self,
mock_playbook,
mock_access):
arglist = [
'--inventory',
self.inventory
]
verifylist = []
mock_access.return_value = False
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaisesRegex(
RuntimeError,
'The inventory file',
self.cmd.take_action,
parsed_args)
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""paramgmt: a library for executing commands to a cluster in parallel."""
# Python 3 compatibility
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import subprocess
import sys
import threading
try:
from termcolor import colored
CAN_COLOR = True
except ImportError:
CAN_COLOR = False
# defaults declared here for uses in binaries
USER_DEFAULT = None
PARALLEL_DEFAULT = True
QUIET_DEFAULT = False
COLOR_DEFAULT = True
ATTEMPTS_DEFAULT = 3
# error message from SSH indicating that it couldn't connect
SSH_ERROR_MSGS = [
'Connection timed out during banner exchange',
'ssh_exchange_identification: Connection closed by remote host',
'ssh_exchange_identification: read: Connection reset by peer']
def _should_color(want_to_color):
"""This function turns 'want_to_color' into 'should_color'."""
return want_to_color and CAN_COLOR and sys.stdout.isatty()
def all_success(mgmt_commands):
"""Determines if all child processes were successful.
Args:
mgmt_commands : A list of all Command objects
Returns:
True if all child processes succeeded
"""
for mgmt_command in mgmt_commands:
if mgmt_command.retcode != 0:
return False
return True
def parse_file(filename):
"""This function parses a file to generate a list of lines.
This function wraps the parse_stream() function by opening
the specified file first.
Args:
filename : The name of the file to be parsed
Returns:
A list of lines.
"""
fd = open(filename, 'r')
lines = parse_stream(fd)
fd.close()
return lines
def parse_stream(stream):
"""This function parses the contents of a stream to generate a list of lines.
This function removes comments delimited by '#', ingores empty
lines, and leading and trailing whitespace.
Args:
stream : A file that has been open()'d
Returns:
A list of lines.
"""
lines = []
for line in stream:
idx = line.find('#')
if idx >= 0:
line = line[:idx]
line = line.strip()
if line:
lines.append(line.strip())
return lines
class Controller(object):
"""This class offers parallel cluster management using SSH and SCP."""
def __init__(self, hosts, user=USER_DEFAULT, parallel=PARALLEL_DEFAULT,
quiet=QUIET_DEFAULT, color=COLOR_DEFAULT,
attempts=ATTEMPTS_DEFAULT):
"""Constructor for Controller.
Args:
hosts : A list of hostnames.
user : The remote user account.
parallel : Run commands in parallel.
quiet : Suppress printing output to stdout.
color : Color the output. Only enabled if sys.stdout.isatty() is true
and not quiet and termcolor was successfully imported.
attempts : Maximum number of process tries.
"""
self._user = user
self._hosts = hosts
self._parallel = parallel
self._quiet = quiet
self._color = _should_color(color)
self._attempts = int(attempts)
self._ssh_connect_timeout = 2
self._ssh_connection_attempts = 3
@property
def user(self):
return self._user
@user.setter
def user(self, val):
self._user = val
@property
def parallel(self):
return self._parallel
@parallel.setter
def parallel(self, val):
self._parallel = val
@property
def quiet(self):
return self._quiet
@quiet.setter
def quiet(self, val):
self._quiet = val
@property
def color(self):
return self._color
@color.setter
def color(self, val, force=False):
if val and force:
if not CAN_COLOR:
raise EnvironmentError('package \'termcolor\' does not exist')
self._color = True
self._color = _should_color(val)
@property
def attempts(self):
return self._attempts
@attempts.setter
def attempts(self, val):
self._attempts = int(val)
@property
def ssh_connect_timeout(self):
return self._ssh_connect_timeout
@ssh_connect_timeout.setter
def ssh_connect_timeout(self, val):
self._ssh_connect_timeout = int(val)
@property
def ssh_connection_attempts(self):
return self._ssh_connection_attempts
@ssh_connection_attempts.setter
def ssh_connection_attempts(self, val):
self._ssh_connection_attempts = int(val)
def _ssh_options(self):
return ['-o', 'PasswordAuthentication=no',
'-o', 'ConnectTimeout={0}'
.format(self._ssh_connect_timeout),
'-o', 'ConnectionAttempts={0}'
.format(self._ssh_connection_attempts)]
def _run_commands(self, mgmt_commands):
"""This runs the specified commands.
Args:
mgmt_commands : A list of Commands
Returns:
Nothing, but it completes mgmt_command objects
"""
# run all commands
outstanding = []
failed = []
for mgmt_command in mgmt_commands:
mgmt_command.start()
if self._parallel:
outstanding.append(mgmt_command)
else:
mgmt_command.join()
if not self._quiet:
print(mgmt_command.status(self._color))
if mgmt_command.retcode is not 0:
failed.append(mgmt_command)
for mgmt_command in outstanding:
mgmt_command.join()
if not self._quiet:
print(mgmt_command.status(self._color))
if mgmt_command.retcode is not 0:
failed.append(mgmt_command)
# show stats
if not self._quiet:
total = len(mgmt_commands)
failures = len(failed)
successes = total - failures
print(('{0} succeeded, {1} failed, {2} total\n'
.format(successes, failures, total)))
if failures > 0:
print('Failed hosts:')
for mgmt_command in failed:
host = mgmt_command.host
if self._color:
host = colored(host, 'red')
print(host)
def local_command(self, commands):
"""Run local command for all hosts specified.
Args:
commands : The local commands.
'?HOST' is replaced with actual hostname.
Returns:
A list of Command objects.
"""
# create a list of Commands for _run_commands()
mgmt_commands = []
for host in self._hosts:
command = []
for c in commands:
command.append(c.replace('?HOST', host))
command = ' '.join(command)
mgmt_command = Command(
host, ['/bin/sh'], self._attempts,
'lcmd [{0}]: {1}'.format(host, command),
command)
mgmt_commands.append(mgmt_command)
# run all commands
self._run_commands(mgmt_commands)
return mgmt_commands
def remote_command(self, commands):
"""Run SSH command to all hosts specified.
Args:
commands : The remote commands of the SSH command.
'?HOST' is replaced with actual hostname.
Returns:
A list of Command objects.
"""
# create a list of Commands for _run_commands()
mgmt_commands = []
for host in self._hosts:
command = ['ssh']
command.extend(self._ssh_options())
if self._user:
rspec = '{0}@{1}'.format(self._user, host)
else:
rspec = '{0}'.format(host)
desc = 'rcmd [{0}]:'.format(rspec)
command.append(rspec)
for c in commands:
tmp = c.replace('?HOST', host)
command.append(tmp)
desc += ' {0}'.format(tmp)
mgmt_command = Command(host, command, self._attempts, desc)
mgmt_commands.append(mgmt_command)
# run all commands
self._run_commands(mgmt_commands)
return mgmt_commands
def remote_push(self, local, remote):
"""Push specified documents to all remote hosts via SCP.
Args:
local : A list of local file(s) and/or directory(ies)
'?HOST' is replaced with actual hostname.
remote : A string specification of the remote destination file(s).
'?HOST' is replaced with actual hostname.
Returns:
A list of Command objects.
"""
# create a list of Commands for _run_commands()
mgmt_commands = []
for host in self._hosts:
command = ['scp', '-r']
command.extend(self._ssh_options())
if self._user:
rspec = '{0}@{1}'.format(self._user, host)
else:
rspec = '{0}'.format(host)
desc = 'rpush [{0}]: '.format(rspec)
for ll in local:
tmp = ll.replace('?HOST', host)
command.append(tmp)
desc += '{0} '.format(tmp)
desc += '=> '
tmp = '{0}:{1}'.format(rspec, remote.replace('?HOST', host))
command.append(tmp)
desc += tmp
mgmt_command = Command(host, command, self._attempts, desc)
mgmt_commands.append(mgmt_command)
# run all commands
self._run_commands(mgmt_commands)
return mgmt_commands
def remote_pull(self, remote, local):
"""Push specified documents to all remote hosts via SCP.
Args:
remote : A list of remote file(s) and/or directory(ies)
'?HOST' is replaced with actual hostname.
local : A string specification of the local destination file(s).
'?HOST' is replaced with actual hostname.
Returns:
A list of Command objects.
"""
# create a list of Commands for _run_commands()
mgmt_commands = []
for host in self._hosts:
command = ['scp', '-r']
command.extend(self._ssh_options())
if self._user:
rspec = '{0}@{1}'.format(self._user, host)
else:
rspec = '{0}'.format(host)
desc = 'rpull [{0}]: '.format(rspec)
remote2 = ''
for idx, rr in enumerate(remote):
remote2 += rr.replace('?HOST', host)
if idx < (len(remote) - 1):
remote2 += ','
if len(remote) > 1:
remote2 = '{{{0}}}'.format(remote2)
tmp = '{0}:{1}'.format(rspec, remote2)
command.append(tmp)
desc += tmp
desc += ' => '
tmp = local.replace('?HOST', host)
command.append(tmp)
desc += tmp
mgmt_command = Command(host, command, self._attempts, desc)
mgmt_commands.append(mgmt_command)
# run all commands
self._run_commands(mgmt_commands)
return mgmt_commands
def remote_script(self, scripts):
"""Run local scripts on remote hosts via SSH.
Args:
scripts : a list of local scripts to be run on the remote hosts.
'?HOST' in script names is replaced with actual hostname.
'?HOST' in the script is replaced with actual hostname.
Returns:
A list of Command objects.
"""
# create a list of Commands for _run_commands()
mgmt_commands = []
for host in self._hosts:
command = ['ssh', '-T']
command.extend(self._ssh_options())
if self._user:
rspec = '{0}@{1}'.format(self._user, host)
else:
rspec = '{0}'.format(host)
desc = 'rscript [{0}]: '.format(rspec)
command.append(rspec)
# read in the text of the scripts
script_names = []
all_script = ''
for script in scripts:
script_name = script.replace('?HOST', host)
script_names.append(script_name)
with open(script_name, 'r') as fd:
all_script += fd.read()
if all_script[-1:] != '\n':
all_script += '\n'
if not all_script:
all_script = ':'
# format description and command
desc += 'running {0}'.format(' '.join(script_names))
mgmt_command = Command(host, command, self._attempts, desc,
all_script.replace('?HOST', host))
mgmt_commands.append(mgmt_command)
# run all commands
self._run_commands(mgmt_commands)
return mgmt_commands
class Command(threading.Thread):
"""A container class for commands given to Controller."""
def __init__(self, host, commands, max_attempts, description=None,
stdin=None):
"""Constructor for Command."""
threading.Thread.__init__(self)
self.host = host
self.commands = commands
if description is not None:
self.description = description
else:
self.description = self.commands
self.attempts = 0
self.max_attempts = max_attempts
self.process = None
self.retcode = None
if stdin:
self.stdin = stdin.encode('utf-8')
else:
self.stdin = None
self.stdout = None
self.stderr = None
def run(self):
"""Runs the command, called by threading library."""
while self.attempts < self.max_attempts:
# attempt to run the process
self.attempts += 1
if self.stdin:
stdin_fd = subprocess.PIPE
else:
stdin_fd = None
self.process = subprocess.Popen(self.commands,
stdin=stdin_fd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = self.process.communicate(input=self.stdin)
self.retcode = self.process.returncode
self.stdout = out.decode('utf-8')
self.stdout = self.stdout.rstrip('\n')
self.stderr = err.decode('utf-8')
self.stderr = self.stderr.rstrip('\n')
self.process = None
if self.retcode != 0:
ssh_error = False
for msg in SSH_ERROR_MSGS:
if self.stderr.startswith(msg):
ssh_error = True
break
if ssh_error:
continue
else:
break
else:
break
def status(self, color=True):
"""This displays the result of the command.
Args:
color : whether or not to color the output
"""
color = _should_color(color)
text = []
if color:
text.append('{0}'.format(colored(self.description, 'blue')))
else:
text.append('{0}'.format(self.description))
if self.stdout:
if color:
text.append('stdout:\n{0}'.format(colored(self.stdout, 'green')))
else:
text.append('stdout:\n{0}'.format(self.stdout))
if self.stderr:
if color:
if self.retcode is not 0:
text.append('stderr:\n{0}'.format(colored(self.stderr, 'red')))
else:
text.append('stderr:\n{0}'.format(colored(self.stderr, 'yellow')))
else:
text.append('stderr:\n{0}'.format(self.stderr))
if self.retcode is not 0:
if color:
text.append('return code: {0}'.format(colored(self.retcode, 'red')))
text.append('attempts: {0}'.format(colored(self.attempts, 'red')))
else:
text.append('return code: {0}'.format(self.retcode))
text.append('attempts: {0}'.format(self.attempts))
elif self.attempts is not 1:
if color:
text.append('attempts: {0}'.format(colored(self.attempts,
'yellow')))
else:
text.append('attempts: {0}'.format(self.attempts))
return '\n'.join(text)
|
|
# Author: Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from .mixin import TransformerMixin
from .base import BaseEstimator, _check_estimator
from ..parallel import parallel_func
class SlidingEstimator(BaseEstimator, TransformerMixin):
"""Search Light.
Fit, predict and score a series of models to each subset of the dataset
along the last dimension. Each entry in the last dimension is referred
to as a task.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
scoring : callable, string, defaults to None
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
Note that the predict_method is automatically identified if scoring is
a string (e.g. scoring="roc_auc" calls predict_proba) but is not
automatically set if scoring is a callable (e.g.
scoring=sklearn.metrics.roc_auc_score).
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
Attributes
----------
``estimators_`` : array-like, shape (n_tasks,)
List of fitted scikit-learn estimators (one per task).
"""
def __init__(self, base_estimator, scoring=None, n_jobs=1): # noqa: D102
_check_estimator(base_estimator)
self.base_estimator = base_estimator
self.n_jobs = n_jobs
self.scoring = scoring
if not isinstance(self.n_jobs, int):
raise ValueError('n_jobs must be int, got %s' % n_jobs)
def __repr__(self): # noqa: D105
repr_str = '<' + super(SlidingEstimator, self).__repr__()
if hasattr(self, 'estimators_'):
repr_str = repr_str[:-1]
repr_str += ', fitted with %i estimators' % len(self.estimators_)
return repr_str + '>'
def fit(self, X, y):
"""Fit a series of independent estimators to the dataset.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The training input samples. For each data slice, a clone estimator
is fitted independently. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
self : object
Return self.
"""
self._check_Xy(X, y)
self.estimators_ = list()
# For fitting, the parallelization is across estimators.
parallel, p_func, n_jobs = parallel_func(_sl_fit, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
estimators = parallel(
p_func(self.base_estimator, split, y)
for split in np.array_split(X, n_jobs, axis=-1))
self.estimators_ = np.concatenate(estimators, 0)
return self
def fit_transform(self, X, y):
"""Fit and transform a series of independent estimators to the dataset.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The training input samples. For each task, a clone estimator
is fitted independently. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
y_pred : array, shape (n_samples, n_tasks) | (n_samples, n_tasks, n_targets)
The predicted values for each estimator.
""" # noqa: E501
return self.fit(X, y).transform(X)
def _transform(self, X, method):
"""Aux. function to make parallel predictions/transformation."""
self._check_Xy(X)
method = _check_method(self.base_estimator, method)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(_sl_transform, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
est_splits = np.array_split(self.estimators_, n_jobs)
y_pred = parallel(p_func(est, x, method)
for (est, x) in zip(est_splits, X_splits))
y_pred = np.concatenate(y_pred, axis=1)
return y_pred
def transform(self, X):
"""Transform each data slice/task with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice/task, the corresponding
estimator makes a transformation of the data, e.g.
``[estimators[ii].transform(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
Returns
-------
Xt : array, shape (n_samples, n_estimators)
The transformed values generated by each estimator.
""" # noqa: E501
return self._transform(X, 'transform')
def predict(self, X):
"""Predict each data slice/task with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
makes the sample predictions, e.g.:
``[estimators[ii].predict(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
Returns
-------
y_pred : array, shape (n_samples, n_estimators) | (n_samples, n_tasks, n_targets)
Predicted values for each estimator/data slice.
""" # noqa: E501
return self._transform(X, 'predict')
def predict_proba(self, X):
"""Predict each data slice with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
makes the sample probabilistic predictions, e.g.:
``[estimators[ii].predict_proba(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
Returns
-------
y_pred : array, shape (n_samples, n_tasks, n_classes)
Predicted probabilities for each estimator/data slice/task.
""" # noqa: E501
return self._transform(X, 'predict_proba')
def decision_function(self, X):
"""Estimate distances of each data slice to the hyperplanes.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
outputs the distance to the hyperplane, e.g.:
``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)
Predicted distances for each estimator/data slice.
Notes
-----
This requires base_estimator to have a ``decision_function`` method.
""" # noqa: E501
return self._transform(X, 'decision_function')
def _check_Xy(self, X, y=None):
"""Aux. function to check input data."""
if y is not None:
if len(X) != len(y) or len(y) < 1:
raise ValueError('X and y must have the same length.')
if X.ndim < 3:
raise ValueError('X must have at least 3 dimensions.')
def score(self, X, y):
"""Score each estimator on each task.
The number of tasks in X should match the number of tasks/estimators
given at fit time, i.e. we need
``X.shape[-1] == len(self.estimators_)``.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
scores the prediction, e.g.:
``[estimators[ii].score(X[..., ii], y) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators)
Score for each estimator/task.
""" # noqa: E501
from sklearn.metrics.scorer import check_scoring
self._check_Xy(X)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
scoring = check_scoring(self.base_estimator, self.scoring)
y = _fix_auc(scoring, y)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(_sl_score, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
est_splits = np.array_split(self.estimators_, n_jobs)
score = parallel(p_func(est, scoring, x, y)
for (est, x) in zip(est_splits, X_splits))
score = np.concatenate(score, axis=0)
return score
def _sl_fit(estimator, X, y):
"""Aux. function to fit SlidingEstimator in parallel.
Fit a clone estimator to each slice of data.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
X : array, shape (n_samples, nd_features, n_estimators)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_sample, )
The target values.
Returns
-------
estimators_ : list of estimators
The fitted estimators.
"""
from sklearn.base import clone
estimators_ = list()
for ii in range(X.shape[-1]):
est = clone(estimator)
est.fit(X[..., ii], y)
estimators_.append(est)
return estimators_
def _sl_transform(estimators, X, method):
"""Aux. function to transform SlidingEstimator in parallel.
Applies transform/predict/decision_function etc for each slice of data.
Parameters
----------
estimators : list of estimators
The fitted estimators.
X : array, shape (n_samples, nd_features, n_estimators)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
method : str
The estimator method to use (e.g. 'predict', 'transform').
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)
The transformations for each slice of data.
""" # noqa: E501
for ii, est in enumerate(estimators):
transform = getattr(est, method)
_y_pred = transform(X[..., ii])
# Initialize array of predictions on the first transform iteration
if ii == 0:
y_pred = _sl_init_pred(_y_pred, X)
y_pred[:, ii, ...] = _y_pred
return y_pred
def _sl_init_pred(y_pred, X):
"""Aux. function to SlidingEstimator to initialize y_pred."""
n_sample, n_tasks = X.shape[0], X.shape[-1]
y_pred = np.zeros((n_sample, n_tasks) + y_pred.shape[1:], y_pred.dtype)
return y_pred
def _sl_score(estimators, scoring, X, y):
"""Aux. function to score SlidingEstimator in parallel.
Predict and score each slice of data.
Parameters
----------
estimators : list, shape (n_tasks,)
The fitted estimators.
X : array, shape (n_samples, nd_features, n_tasks)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
scoring : callable, string or None
If scoring is None (default), the predictions are internally
generated by estimator.score(). Else, we must first get the
predictions to pass them to ad-hoc scorer.
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_tasks,)
The score for each task / slice of data.
"""
n_tasks = X.shape[-1]
score = np.zeros(n_tasks)
for ii, est in enumerate(estimators):
score[ii] = scoring(est, X[..., ii], y)
return score
def _check_method(estimator, method):
"""Check that an estimator has the method attribute.
If method == 'transform' and estimator does not have 'transform', use
'predict' instead.
"""
if method == 'transform' and not hasattr(estimator, 'transform'):
method = 'predict'
if not hasattr(estimator, method):
ValueError('base_estimator does not have `%s` method.' % method)
return method
class GeneralizingEstimator(SlidingEstimator):
"""Generalization Light.
Fit a search-light along the last dimension and use them to apply a
systematic cross-tasks generalization.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
scoring : callable | string | None
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
Note that the predict_method is automatically identified if scoring is
a string (e.g. scoring="roc_auc" calls predict_proba) but is not
automatically set if scoring is a callable (e.g.
scoring=sklearn.metrics.roc_auc_score).
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
"""
def __repr__(self): # noqa: D105
repr_str = super(GeneralizingEstimator, self).__repr__()
if hasattr(self, 'estimators_'):
repr_str = repr_str[:-1]
repr_str += ', fitted with %i estimators>' % len(self.estimators_)
return repr_str
def _transform(self, X, method):
"""Aux. function to make parallel predictions/transformation."""
self._check_Xy(X)
method = _check_method(self.base_estimator, method)
parallel, p_func, n_jobs = parallel_func(_gl_transform, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
y_pred = parallel(
p_func(self.estimators_, x_split, method)
for x_split in np.array_split(X, n_jobs, axis=-1))
y_pred = np.concatenate(y_pred, axis=2)
return y_pred
def transform(self, X):
"""Transform each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The input samples. For estimator the corresponding data slice is
used to make a transformation. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
Xt : array, shape (n_samples, n_estimators, n_slices)
The transformed values generated by each estimator.
"""
return self._transform(X, 'transform')
def predict(self, X):
"""Predict each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a fitted estimator
predicts each slice of the data independently. The feature
dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices) | (n_samples, n_estimators, n_slices, n_targets)
The predicted values for each estimator.
""" # noqa: E501
return self._transform(X, 'predict')
def predict_proba(self, X):
"""Estimate probabilistic estimates of each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a fitted estimator
predicts a slice of the data. The feature dimension can be
multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes)
The predicted values for each estimator.
Notes
-----
This requires base_estimator to have a `predict_proba` method.
""" # noqa: E501
return self._transform(X, 'predict_proba')
def decision_function(self, X):
"""Estimate distances of each data slice to all hyperplanes.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. Each estimator outputs the distance to
its hyperplane, e.g.:
``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes * (n_classes-1) // 2)
The predicted values for each estimator.
Notes
-----
This requires base_estimator to have a ``decision_function`` method.
""" # noqa: E501
return self._transform(X, 'decision_function')
def score(self, X, y):
"""Score each of the estimators on the tested dimensions.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The input samples. For each data slice, the corresponding estimator
scores the prediction, e.g.:
``[estimators[ii].score(X[..., ii], y) for ii in range(n_slices)]``.
The feature dimension can be multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators, n_slices)
Score for each estimator / data slice couple.
""" # noqa: E501
from sklearn.metrics.scorer import check_scoring
self._check_Xy(X)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(_gl_score, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
scoring = check_scoring(self.base_estimator, self.scoring)
y = _fix_auc(scoring, y)
score = parallel(p_func(self.estimators_, scoring, x, y)
for x in X_splits)
score = np.concatenate(score, axis=1)
return score
def _gl_transform(estimators, X, method):
"""Transform the dataset.
This will apply each estimator to all slices of the data.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a clone estimator
is fitted independently. The feature dimension can be multidimensional
e.g. X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
Xt : array, shape (n_samples, n_slices)
The transformed values generated by each estimator.
"""
n_sample, n_iter = X.shape[0], X.shape[-1]
for ii, est in enumerate(estimators):
# stack generalized data for faster prediction
X_stack = X.transpose(np.r_[0, X.ndim - 1, range(1, X.ndim - 1)])
X_stack = X_stack.reshape(np.r_[n_sample * n_iter, X_stack.shape[2:]])
transform = getattr(est, method)
_y_pred = transform(X_stack)
# unstack generalizations
if _y_pred.ndim == 2:
_y_pred = np.reshape(_y_pred, [n_sample, n_iter, _y_pred.shape[1]])
else:
shape = np.r_[n_sample, n_iter, _y_pred.shape[1:]].astype(int)
_y_pred = np.reshape(_y_pred, shape)
# Initialize array of predictions on the first transform iteration
if ii == 0:
y_pred = _gl_init_pred(_y_pred, X, len(estimators))
y_pred[:, ii, ...] = _y_pred
return y_pred
def _gl_init_pred(y_pred, X, n_train):
"""Aux. function to GeneralizingEstimator to initialize y_pred."""
n_sample, n_iter = X.shape[0], X.shape[-1]
if y_pred.ndim == 3:
y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]),
y_pred.dtype)
else:
y_pred = np.zeros((n_sample, n_train, n_iter), y_pred.dtype)
return y_pred
def _gl_score(estimators, scoring, X, y):
"""Score GeneralizingEstimator in parallel.
Predict and score each slice of data.
Parameters
----------
estimators : list of estimators
The fitted estimators.
scoring : callable, string or None
If scoring is None (default), the predictions are internally
generated by estimator.score(). Else, we must first get the
predictions to pass them to ad-hoc scorer.
X : array, shape (n_samples, nd_features, n_slices)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_estimators, n_slices)
The score for each slice of data.
"""
# FIXME: The level parallization may be a bit high, and might be memory
# consuming. Perhaps need to lower it down to the loop across X slices.
score_shape = [len(estimators), X.shape[-1]]
for ii, est in enumerate(estimators):
for jj in range(X.shape[-1]):
_score = scoring(est, X[..., jj], y)
# Initialize array of predictions on the first score iteration
if (ii == 0) & (jj == 0):
dtype = type(_score)
score = np.zeros(score_shape, dtype)
score[ii, jj, ...] = _score
return score
def _fix_auc(scoring, y):
from sklearn.preprocessing import LabelEncoder
# This fixes sklearn's inability to compute roc_auc when y not in [0, 1]
# scikit-learn/scikit-learn#6874
if scoring is not None:
if (
hasattr(scoring, '_score_func') and
hasattr(scoring._score_func, '__name__') and
scoring._score_func.__name__ == 'roc_auc_score'
):
if np.ndim(y) != 1 or len(set(y)) != 2:
raise ValueError('roc_auc scoring can only be computed for '
'two-class problems.')
y = LabelEncoder().fit_transform(y)
return y
|
|
"""
- /*
- * Copyright 2008 Google Inc.
- * Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
- * Copyright (C) 2010 Rich Newpol <rich.newpol@gmail.com>
- *
- * Licensed under the Apache License, Version 2.0 (the "License") you may not
- * use this file except in compliance with the License. You may obtain a copy
- * of the License at
- *
- * http:#www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
"""
from __pyjamas__ import console
from pyjamas import DOM
from pyjamas import Window
from pyjamas import DeferredCommand
from pyjamas.EventController import EventGenerator
from pyjamas.ui import GlassWidget
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.AbsolutePanel import AbsolutePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.MouseListener import MouseHandler, fireMouseEvent
from pyjamas.ui import Event
class SplitPanelSplitter(SimplePanel, MouseHandler):
""" a splitter is just a SimplePanel which can receive mouse events """
def __init__(self, splitPanel, **kwargs):
# keep a ref to our parent panel for event callback
self._splitpanel = splitPanel
SimplePanel.__init__(self, **kwargs)
MouseHandler.__init__(self)
self.addMouseListener(self)
# set some constant styles
elem = self.getElement()
# the following allows splitter to be small enough in IE
DOM.setStyleAttribute(elem, "overflow", "hidden")
def onMouseDown(self, sender, x, y):
""" catch a mouse down for parent """
ev = DOM.eventGetCurrentEvent()
# ignore right-button downs
if DOM.eventGetButton(ev) != Event.BUTTON_LEFT:
return
DOM.eventPreventDefault(DOM.eventGetCurrentEvent())
# parent will capture the mouse and handle the dragging from here
self._splitpanel.startSplitterDrag(x, y)
class SplitPanel(AbsolutePanel, MouseHandler, EventGenerator):
""" Provides the SplitPanel baseclass functionality
A SplitPanel is an AbsolutePanel containing an HTMLTable
with three cells. The first cell holds the first ScrollPanel,
while the center cell holds a Splitter, and the last cell
holds the other ScrollPanel.
"""
def __init__(self, vertical=False, **kwargs):
# set defaults
if not 'StyleName' in kwargs:
if vertical: # vertical split panel
kwargs['StyleName'] = "gwt-VerticalSplitPanel"
else:
kwargs['StyleName'] = "gwt-HorizontalSplitPanel"
# splitter drag state vars
self._drag_start = None
self._pos = "50%"
# orientation
self._vertical = vertical
# now init the bases
AbsolutePanel.__init__(self, **kwargs)
MouseHandler.__init__(self)
# add our event support?
self.addListenedEvent("Resize")
# create the top/left widget container
self._container1 = ScrollPanel()
# create the bottom/right widget container
self._container2 = ScrollPanel()
# create the splitter
self._splitter = SplitPanelSplitter(self)
# add splitter handling
self._splitter.addMouseListener(self)
# add mouse event handling
self.addMouseListener(self)
# add the parts
AbsolutePanel.add(self, self._container1, 0, 0)
AbsolutePanel.add(self, self._splitter, 0, 0)
AbsolutePanel.add(self, self._container2, 0, 0)
# set the layout
if vertical: # vertical split panel
self._splitter.setStyleName("vsplitter")
self._splitter.setWidth("100%")
self._container1.setWidth("100%")
self._container2.setWidth("100%")
# set drag cursor
DOM.setStyleAttribute(self._splitter.getElement(),
"cursor", "n-resize")
else: # horizontal split panel
self._splitter.setStyleName("hsplitter")
self._splitter.setHeight("100%")
self._container1.setHeight("100%")
self._container2.setHeight("100%")
# set drag cursor
DOM.setStyleAttribute(self._splitter.getElement(),
"cursor", "e-resize")
def onAttach(self):
AbsolutePanel.onAttach(self)
self.setSplitPosition()
# fixup the container 2 size and position
def _finalizePositions(self, pos=None):
finalized = False
if self._vertical:
if pos is None:
pos = self._container1.getOffsetHeight()
space = self.getOffsetHeight()
sz = self._splitter.getOffsetHeight()
if space > 0 and sz > 0 and pos > 0:
# limit pos
if pos > space - sz:
pos = space - sz
self._container1.setHeight(pos)
self.setWidgetPosition(self._splitter, 0, pos)
self.setWidgetPosition(self._container2, 0, pos + sz)
self._container2.setHeight(space - (pos + sz))
finalized = True
else:
if pos is None:
pos = self._container1.getOffsetWidth()
space = self.getOffsetWidth()
sz = self._splitter.getOffsetWidth()
if space > 0 and sz > 0 and pos > 0:
# limit pos
if pos > space - sz:
pos = space - sz
self._container1.setWidth(pos)
self.setWidgetPosition(self._splitter, pos, 0)
self.setWidgetPosition(self._container2, pos + sz, 0)
self._container2.setWidth(space - (pos + sz))
finalized = True
if finalized:
self.dispatchResizeEvent(self, pos)
return finalized
# end a drag operation
def _stopDragging(self):
if self._drag_start is not None:
# we are no longer dragging
self._drag_start = None
# deactivate the transparent overlay
GlassWidget.hide()
# don't let a mouse-up become a click event
DOM.eventCancelBubble(DOM.eventGetCurrentEvent(), True)
def _isDragging(self):
return self._drag_start is not None
# start a drag operation (called by splitter)
def startSplitterDrag(self, x, y):
if self._drag_start is None:
# remember where on the slider we are dragging
if self._vertical:
self._drag_start = y
else:
self._drag_start = x
# activate the transparent overlay to keep mouse events flowing to
# the splitter (and to us) even if the mouse leaves the splitter
GlassWidget.show(self)
# add handlers for mouse events to support dragging the slider
# NOTE: the x,y positioni s relative to the splitter
def onMouseMove(self, sender, x, y):
# if dragging, then use current mouse position
# to reset splitter position
if not self._isDragging():
return
# remove the offset into the splitter
# where we started dragging
if self._vertical:
self._pos = y - self._drag_start
else:
self._pos = x - self._drag_start
# apply limit
if self._pos < 1:
self._pos = 1
# apply new position
self.setSplitPosition()
def onMouseUp(self, sender, x, y):
ev = DOM.eventGetCurrentEvent()
# ignore right-button ups
if DOM.eventGetButton(ev) != Event.BUTTON_LEFT:
return
DOM.eventPreventDefault(ev)
# if we are dragging
if self._isDragging():
# stop dragging on mouse up
self._stopDragging()
# called when we start dragging
def onMouseGlassEnter(self, sender):
pass
# called when we drag out of the window
# (NOT called when we just stop dragging)
def onMouseGlassLeave(self, sender):
# we left the window, so stop dragging
self._stopDragging()
#
# Start the inherited 'public' API
#
# specify splitter position in pix OR percentage
# if pixels (number) specified, we can make change now
# otherwise, we have to set the offset as specified, then
# 'fixup' the remaining space after rendering
def setSplitPosition(self, pos=None):
if pos is not None:
# remember last pos set
self._pos = pos
else:
pos = self._pos
if pos < 1:
pos = 1
self._pos = pos
# change adjustable dimension
if self._vertical:
self._container1.setHeight(pos)
else:
self._container1.setWidth(pos)
# if pix are given, we can try to finalize the positions
finalized = False
if isinstance(pos, int):
finalized = self._finalizePositions(pos)
# if needed, queue callback to finalize
if not finalized:
DeferredCommand.add(self._finalizePositions)
def getWidget(self, index):
if index == 0:
return self._container1.getWidget()
return self._container2.getWidget()
def setWidget(self, index, widget):
if index == 0:
return self._container1.setWidget(widget)
return self._container2.setWidget(widget)
# Adds a widget to a pane
def add(self, widget):
if self.getWidget(0) == None:
self.setWidget(0, widget)
elif self.getWidget(1) == None:
self.setWidget(1, widget)
else:
console.error("SimplePanel can only contain one child widget")
# Removes a child widget.
def remove(self, widget):
if self.getWidget(0) == widget:
self._container1.remove(widget)
elif self.getWidget(1) == widget:
self._container2.remove(widget)
else:
AbsolutePanel.remove(self, widget)
# Gets the content element for the given index.
def getElement(self, index=None):
if index is None:
return AbsolutePanel.getElement(self)
return self.getWidget(index).getElement()
# Gets the widget in the pane at end of the line direction for the layout
def getEndOfLineWidget(self):
return self.getWidget(1)
# Gets the element that is acting as the splitter.
def getSplitElement(self):
return self._splitter.getElement()
# Gets the widget in the pane at the start of line direction for the layout
def getStartOfLineWidget(self):
return self.getWidget(0)
# Indicates whether the split panel is being resized.
def isResizing(self):
return False
# Sets the widget in the pane at the end of line direction for the layout
def setEndOfLineWidget(self, widget):
self.setWidget(1, widget)
def setStartOfLineWidget(self, widget):
self.setWidget(0, widget)
|
|
import pickle
from collections import OrderedDict, deque
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from ...feature_detectors.cluster import hart85_means_shift_cluster
from ...feature_detectors.steady_states import find_steady_states_transients
from . import Disaggregator
class MyDeque(deque):
def popmiddle(self, pos):
self.rotate(-pos)
ret = self.popleft()
self.rotate(pos)
return ret
class PairBuffer(object):
"""
Attributes:
* transitionList (list of tuples)
* matchedPairs (dataframe containing matched pairs of transitions)
"""
def __init__(self, columns, buffer_size, min_tolerance, percent_tolerance,
large_transition, num_measurements):
"""
Parameters
----------
buffer_size: int, optional
size of the buffer to use for finding edges
min_tolerance: int, optional
variance in power draw allowed for pairing a match
percent_tolerance: float, optional
if transition is greater than large_transition, then use percent of large_transition
large_transition: float, optional
power draw of a Large transition
num_measurements: int, optional
2 if only active power
3 if both active and reactive power
"""
# We use a deque here, because it allows us quick access to start and end popping
# and additionally, we can set a maxlen which drops oldest items. This nicely
# suits Hart's recomendation that the size should be tunable.
self._buffer_size = buffer_size
self._min_tol = min_tolerance
self._percent_tol = percent_tolerance
self._large_transition = large_transition
self.transition_list = MyDeque([], maxlen=self._buffer_size)
self._num_measurements = num_measurements
if self._num_measurements == 3:
# Both active and reactive power is available
self.pair_columns = ['T1 Time', 'T1 Active', 'T1 Reactive',
'T2 Time', 'T2 Active', 'T2 Reactive']
elif self._num_measurements == 2:
# Only active power is available
if columns[0][1] == 'active':
self.pair_columns = ['T1 Time', 'T1 Active',
'T2 Time', 'T2 Active']
elif columns[0][1] == 'apparent':
self.pair_columns = ['T1 Time', 'T1 Apparent',
'T2 Time', 'T2 Apparent']
self.matched_pairs = pd.DataFrame(columns=self.pair_columns)
def clean_buffer(self):
# Remove any matched transactions
for idx, entry in enumerate(self.transition_list):
if entry[self._num_measurements]:
self.transition_list.popmiddle(idx)
self.clean_buffer()
break
# Remove oldest transaction if buffer cleaning didn't remove anything
# if len(self.transitionList) == self._bufferSize:
# self.transitionList.popleft()
def add_transition(self, transition):
# Check transition is as expected.
assert isinstance(transition, (tuple, list))
# Check that we have both active and reactive powers.
assert len(transition) == self._num_measurements
# Convert as appropriate
if isinstance(transition, tuple):
mtransition = list(transition)
# Add transition to List of transitions (set marker as unpaired)
mtransition.append(False)
self.transition_list.append(mtransition)
# checking for pairs
# self.pairTransitions()
# self.cleanBuffer()
def pair_transitions(self):
"""
Hart 85, P 33.
The algorithm must not allow an 0N transition to match an OFF which occurred at the end
of a different cycle, so that only ON/OFF pairs which truly belong
together are paired up. Otherwise the energy consumption of the
appliance will be greatly overestimated.
Hart 85, P 32.
For the two-state load monitor, a pair is defined as two entries
which meet the following four conditions:
(1) They are on the same leg, or are both 240 V,
(2) They are both unmarked,
(3) The earlier has a positive real power component, and
(4) When added together, they result in a vector in which the
absolute value of the real power component is less than 35
Watts (or 3.5% of the real power, if the transitions are
over 1000 W) and the absolute value of the reactive power
component is less than 35 VAR (or 3.5%).
"""
tlength = len(self.transition_list)
pairmatched = False
if tlength < 2:
return pairmatched
# Can we reduce the running time of this algorithm?
# My gut feeling is no, because we can't re-order the list...
# I wonder if we sort but then check the time... maybe. TO DO
# (perhaps!).
new_matched_pairs = []
# Start the element distance at 1, go up to current length of buffer
for eDistance in range(1, tlength):
idx = 0
while idx < tlength - 1:
# We don't want to go beyond length of array
compindex = idx + eDistance
if compindex < tlength:
val = self.transition_list[idx]
# val[1] is the active power and
# val[self._num_measurements] is match status
if (val[1] > 0) and (val[self._num_measurements] is False):
compval = self.transition_list[compindex]
if compval[self._num_measurements] is False:
# Add the two elements for comparison
vsum = np.add(
val[1:self._num_measurements],
compval[1:self._num_measurements])
# Set the allowable tolerance for reactive and
# active
matchtols = [self._min_tol, self._min_tol]
for ix in range(1, self._num_measurements):
matchtols[ix - 1] = (
self._min_tol
if (max(np.fabs([val[ix], compval[ix]])) < self._large_transition)
else (self._percent_tol * max(np.fabs([val[ix], compval[ix]])))
)
if self._num_measurements == 3:
condition = (
np.fabs(
vsum[0]) < matchtols[0]) and (
np.fabs(
vsum[1]) < matchtols[1])
elif self._num_measurements == 2:
condition = np.fabs(vsum[0]) < matchtols[0]
if condition:
# Mark the transition as complete
self.transition_list[idx][self._num_measurements] = True
self.transition_list[compindex][self._num_measurements] = True
pairmatched = True
# Append the OFF transition to the ON. Add to
# the list.
matchedpair = val[0:self._num_measurements] + \
compval[0:self._num_measurements]
new_matched_pairs.append(matchedpair)
# Iterate Index
idx += 1
else:
break
# Process new pairs in a single operation (faster than growing the
# dataframe)
if pairmatched:
if self.matched_pairs.empty:
self.matched_pairs = pd.DataFrame(
new_matched_pairs, columns=self.pair_columns)
else:
self.matched_pairs = self.matched_pairs.append(
pd.DataFrame(new_matched_pairs, columns=self.pair_columns))
return pairmatched
class Hart85(Disaggregator):
"""1 or 2 dimensional Hart 1985 algorithm.
Attributes
----------
model : dict
Each key is either the instance integer for an ElecMeter,
or a tuple of instances for a MeterGroup.
Each value is a sorted list of power in different states.
"""
def __init__(self):
self.model = {}
self.MODEL_NAME = "Hart85"
def train(
self,
metergroup,
columns=[
('power',
'active')],
buffer_size=20,
noise_level=70,
state_threshold=15,
min_tolerance=100,
percent_tolerance=0.035,
large_transition=1000,
**kwargs):
"""
Train using Hart85. Places the learnt model in `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
columns: nilmtk.Measurement, should be one of the following
[('power','active')]
[('power','apparent')]
[('power','reactive')]
[('power','active'), ('power', 'reactive')]
buffer_size: int, optional
size of the buffer to use for finding edges
min_tolerance: int, optional
variance in power draw allowed for pairing a match
percent_tolerance: float, optional
if transition is greater than large_transition,
then use percent of large_transition
large_transition: float, optional
power draw of a Large transition
"""
self.columns = columns
self.state_threshold = state_threshold
self.noise_level = noise_level
[self.steady_states, self.transients] = find_steady_states_transients(
metergroup, columns, noise_level, state_threshold, **kwargs)
self.pair_df = self.pair(
buffer_size, min_tolerance, percent_tolerance, large_transition)
self.centroids = hart85_means_shift_cluster(self.pair_df, columns)
self.model = dict(
columns=columns,
state_threshold=state_threshold,
noise_level=noise_level,
steady_states=self.steady_states,
transients=self.transients,
# pair_df=self.pair_df,
centroids=self.centroids
)
def pair(self, buffer_size, min_tolerance, percent_tolerance,
large_transition):
subset = list(self.transients.itertuples())
buffer = PairBuffer(
columns=self.columns,
min_tolerance=min_tolerance,
buffer_size=buffer_size,
percent_tolerance=percent_tolerance,
large_transition=large_transition,
num_measurements=len(
self.transients.columns) + 1)
for s in subset:
# if len(buffer.transitionList) < bsize
if len(buffer.transition_list) == buffer_size:
buffer.clean_buffer()
buffer.add_transition(s)
buffer.pair_transitions()
return buffer.matched_pairs
def disaggregate_chunk(self, chunk, prev, transients):
"""
Parameters
----------
chunk : pd.DataFrame
mains power
prev
transients : returned by find_steady_state_transients
Returns
-------
states : pd.DataFrame
with same index as `chunk`.
"""
states = pd.DataFrame(
-1, index=chunk.index, columns=self.centroids.index.values)
for transient_tuple in transients.itertuples():
if transient_tuple[0] < chunk.index[0]:
# Transient occurs before chunk has started; do nothing
pass
elif transient_tuple[0] > chunk.index[-1]:
# Transient occurs after chunk has ended; do nothing
pass
else:
# Absolute value of transient
abs_value = np.abs(transient_tuple[1:])
positive = transient_tuple[1] > 0
abs_value_transient_minus_centroid = pd.DataFrame(
(self.centroids - abs_value).abs())
if len(transient_tuple) == 2:
# 1d data
index_least_delta = (
abs_value_transient_minus_centroid.idxmin().values[0])
else:
# 2d data.
# Need to find absolute value before computing minimum
columns = abs_value_transient_minus_centroid.columns
abs_value_transient_minus_centroid["multidim"] = (
abs_value_transient_minus_centroid[columns[0]] ** 2
+
abs_value_transient_minus_centroid[columns[1]] ** 2)
index_least_delta = (
abs_value_transient_minus_centroid["multidim"].idxmin())
if positive:
# Turned on
states.loc[transient_tuple[0]][index_least_delta] = 1
else:
# Turned off
states.loc[transient_tuple[0]][index_least_delta] = 0
prev = states.iloc[-1].to_dict()
power_chunk_dict = self.assign_power_from_states(states, prev)
self.power_dict = power_chunk_dict
self.chunk_index = chunk.index
# Check whether 1d data or 2d data and converting dict to dataframe
if len(transient_tuple) == 2:
temp_df = pd.DataFrame(power_chunk_dict, index=chunk.index)
return temp_df, 2
else:
tuples = []
for i in range(len(self.centroids.index.values)):
for j in range(0, 2):
tuples.append([i, j])
columns = pd.MultiIndex.from_tuples(tuples)
temp_df = pd.DataFrame(
power_chunk_dict,
index=chunk.index,
columns=columns)
for i in range(len(chunk.index)):
for j in range(len(self.centroids.index.values)):
for k in range(0, 2):
temp_df.iloc[i][j][k] = power_chunk_dict[j][i][k]
return temp_df, 3
def assign_power_from_states(self, states_chunk, prev):
di = {}
ndim = len(self.centroids.columns)
for appliance in states_chunk.columns:
values = states_chunk[[appliance]].values.flatten()
if ndim == 1:
power = np.zeros(len(values), dtype=int)
else:
power = np.zeros((len(values), 2), dtype=int)
# on = False
i = 0
while i < len(values) - 1:
if values[i] == 1:
# print("A", values[i], i)
on = True
i = i + 1
power[i] = self.centroids.loc[appliance].values
while values[i] != 0 and i < len(values) - 1:
# print("B", values[i], i)
power[i] = self.centroids.loc[appliance].values
i = i + 1
elif values[i] == 0:
# print("C", values[i], i)
on = False
i = i + 1
power[i] = 0
while values[i] != 1 and i < len(values) - 1:
# print("D", values[i], i)
if ndim == 1:
power[i] = 0
else:
power[i] = [0, 0]
i = i + 1
else:
# print("E", values[i], i)
# Unknown state. If previously we know about this
# appliance's state, we can
# use that. Else, it defaults to 0
if prev[appliance] == -1 or prev[appliance] == 0:
# print("F", values[i], i)
on = False
power[i] = 0
while values[i] != 1 and i < len(values) - 1:
# print("G", values[i], i)
if ndim == 1:
power[i] = 0
else:
power[i] = [0, 0]
i = i + 1
else:
# print("H", values[i], i)
on = True
power[i] = self.centroids.loc[appliance].values
while values[i] != 0 and i < len(values) - 1:
# print("I", values[i], i)
power[i] = self.centroids.loc[appliance].values
i = i + 1
di[appliance] = power
# print(power.sum())
return di
def disaggregate(self, mains, output_datastore, **load_kwargs):
"""Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
sample_period : number, optional
The desired sample period in seconds.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
"""
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
[_, transients] = find_steady_states_transients(
mains, columns=self.columns, state_threshold=self.state_threshold,
noise_level=self.noise_level, **load_kwargs)
# For now ignoring the first transient
# transients = transients[1:]
# Initially all appliances/meters are in unknown state (denoted by -1)
prev = OrderedDict()
learnt_meters = self.centroids.index.values
for meter in learnt_meters:
prev[meter] = -1
timeframes = []
# Now iterating over mains data and disaggregating chunk by chunk
if len(self.columns) == 1:
ac_type = self.columns[0][1]
else:
ac_type = ['active', 'reactive']
for chunk in mains.power_series(**load_kwargs):
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
power_df, dimen = self.disaggregate_chunk(
chunk, prev, transients)
if dimen == 2:
columns = pd.MultiIndex.from_tuples([chunk.name])
else:
tuples = list(self.columns)
columns = pd.MultiIndex.from_tuples(tuples)
for meter in learnt_meters:
data_is_available = True
df = power_df[[meter]]
df.columns = columns
df.columns.names = ['physical_quantity', 'type']
key = '{}/elec/meter{:d}'.format(building_path, meter + 2)
val = df.apply(pd.to_numeric).astype('float32')
output_datastore.append(key, value=val)
print('Next Chunk..')
print('Appending mains data to datastore')
for chunk_mains in mains.load(ac_type=ac_type):
chunk_df = chunk_mains
chunk_df = chunk_df.apply(pd.to_numeric).astype('float32')
print('Done')
output_datastore.append(key=mains_data_location,
value=chunk_df)
# save metadata
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
supervised=False,
num_meters=len(self.centroids)
)
return power_df
def export_model(self, filename):
example_dict = self.model
with open(filename, "wb") as pickle_out:
pickle.dump(example_dict, pickle_out)
def import_model(self, filename):
with open(filename, "rb") as pickle_in:
self.model = pickle.load(pickle_in)
self.columns = self.model['columns']
self.state_threshold = self.model['state_threshold']
self.noise_level = self.model['noise_level']
self.steady_states = self.model['steady_states']
self.transients = self.model['transients']
# pair_df=self.pair_df,
self.centroids = self.model['centroids']
def best_matched_appliance(self, submeters, pred_df):
"""
Parameters
----------
submeters : elec.submeters object
pred_df : predicted dataframe returned by disaggregate()
Returns
-------
list : containing best matched pairs to disaggregated output
"""
rms_error = {}
submeters_df = submeters.dataframe_of_meters()
new_df = pd.merge(
pred_df,
submeters_df,
left_index=True,
right_index=True)
rmse_all = []
for pred_appliance in pred_df.columns:
rmse = {}
for appliance in submeters_df.columns:
temp_value = (
np.sqrt(
mean_squared_error(
new_df[pred_appliance],
new_df[appliance])))
rmse[appliance] = temp_value
rmse_all.append(rmse)
match = []
for i in range(len(rmse_all)):
key_min = min(rmse_all[i].keys(), key=(lambda k: rmse_all[i][k]))
print('Best Matched Pair is', (i, key_min))
|
|
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import IntegerField, BooleanField
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.six.moves import xrange
from django.utils.translation import ungettext, ugettext as _
__all__ = ('BaseFormSet', 'formset_factory', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
# default minimum number of forms in a formset
DEFAULT_MIN_NUM = 0
# default maximum number of forms in a formset, to prevent memory exhaustion
DEFAULT_MAX_NUM = 1000
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
# MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of
# the management form, but only for the convenience of client-side
# code. The POST value of them returned from the client is not checked.
self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered with'),
code='missing_management_form',
)
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MIN_NUM_FORM_COUNT: self.min_num,
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
# return absolute_max if it is lower than the actual total form
# count in the data; this is DoS protection to prevent clients
# from forcing the server to instantiate arbitrary numbers of
# forms
return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)
else:
initial_forms = self.initial_form_count()
total_forms = initial_forms + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the initial data if it's there, 0 otherwise.
initial_forms = len(self.initial) if self.initial else 0
return initial_forms
@cached_property
def forms(self):
"""
Instantiate forms at first property access.
"""
# DoS protection is included in total_form_count()
forms = [self._construct_form(i) for i in xrange(self.total_form_count())]
return forms
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and 'initial' not in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion.
"""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is None:
self.full_clean()
return self._non_form_errors
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def total_error_count(self):
"""
Returns the number of errors across all forms in the formset.
"""
return len(self.non_form_errors()) +\
sum(len(form_errors) for form_errors in self.errors)
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
# This triggers a full clean.
self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not self.non_form_errors()
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self._non_form_errors.
"""
self._errors = []
self._non_form_errors = self.error_class()
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
try:
if (self.validate_max and
self.total_form_count() - len(self.deleted_forms) > self.max_num) or \
self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max:
raise ValidationError(ungettext(
"Please submit %d or fewer forms.",
"Please submit %d or fewer forms.", self.max_num) % self.max_num,
code='too_many_forms',
)
if (self.validate_min and
self.total_form_count() - len(self.deleted_forms) < self.min_num):
raise ValidationError(ungettext(
"Please submit %d or more forms.",
"Please submit %d or more forms.", self.min_num) % self.min_num,
code='too_few_forms')
# Give self.clean() a chance to do cross-form validation.
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.error_list)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accessible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join(form.as_table() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join(form.as_p() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join(form.as_ul() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False):
"""Return a FormSet for the given form class."""
if min_num is None:
min_num = DEFAULT_MIN_NUM
if max_num is None:
max_num = DEFAULT_MAX_NUM
# hard limit on forms instantiated, to prevent memory-exhaustion attacks
# limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM
# if max_num is None in the first place)
absolute_max = max_num + DEFAULT_MAX_NUM
extra += min_num
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'min_num': min_num, 'max_num': max_num,
'absolute_max': absolute_max, 'validate_min': validate_min,
'validate_max': validate_max}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
|
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (c) 2016, 2017 Taro Sato
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import inspect
import logging
import random
import string
import threading
import time
from StringIO import StringIO
import mock
import pytest
import redis
from pyrediq import PriorityQueue
from pyrediq import QueueEmpty
from pyrediq.priority_queue import _cleanup
from pyrediq.priority_queue import Message
from pyrediq.priority_queue import MessageConsumer
from pyrediq.priority_queue import orphan_consumer_cleaner
from pyrediq.priority_queue import Packed
log = logging.getLogger(__name__)
TEST_QUEUE_PREFIX = '__pyrediqtest_'
def random_chars(n=12):
return ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in xrange(n))
def generate_queue_name():
return TEST_QUEUE_PREFIX + random_chars()
@pytest.fixture
def queue():
mq = PriorityQueue(generate_queue_name(), redis.StrictRedis())
yield mq
mq.purge()
def spawn(func, *args, **kwargs):
th = threading.Thread(target=func, args=args, kwargs=kwargs)
th.start()
return th
def joinall(threads):
for th in threads:
# this should block
th.join()
def message_producer(redis_conn, queue_name, messages, sleep=None):
"""Simulates a function producing messages."""
if inspect.isfunction(sleep):
pass
elif isinstance(sleep, (int, float)):
time.sleep(random.random() * 0.1)
else:
def donothing():
pass
sleep = donothing
queue = PriorityQueue(queue_name, redis_conn)
for msg in messages:
queue.put(**msg)
sleep()
def message_consumer(redis_conn, queue_name, message_count, timeout=None):
queue = PriorityQueue(queue_name, redis_conn)
with queue.consumer() as consumer:
for _ in xrange(message_count):
msg = consumer.get(block=True, timeout=timeout)
# trigger exception to simulate failure
bomb = msg.payload.get('bomb', False)
if bomb:
raise Exception('Bomb exploded')
# simulate some computation after getting the message
proc_time = msg.payload.get('processing_time', 0)
time.sleep(proc_time)
if msg.payload.get('reject'):
consumer.reject(msg)
else:
consumer.ack(msg)
def test_single_consumer(queue, caplog):
caplog.setLevel(logging.DEBUG, logger='pyrediq')
msgs = [{'payload': {'message': '{!r}'.format(i)},
'priority': random.randint(
PriorityQueue.MIN_PRIORITY, PriorityQueue.MAX_PRIORITY)}
for i in xrange(1)]
threads = []
threads.append(spawn(
message_consumer, queue._conn, queue.name, len(msgs)))
message_producer(queue._conn, queue.name, msgs)
joinall(threads)
for thread in threads:
assert thread.is_alive() is False
assert len(list(queue._get_consumer_ids())) == 0
assert queue.is_empty()
def test_multiple_consumers(queue, caplog):
caplog.setLevel(logging.WARNING, logger='redis_lock')
caplog.setLevel(logging.DEBUG, logger='pyrediq')
n_message = 10
msgs = [{'payload': {'processing_time': random.random()},
'priority': random.randint(
PriorityQueue.MIN_PRIORITY, PriorityQueue.MAX_PRIORITY)}
for i in xrange(n_message)]
threads = []
for _ in xrange(n_message):
threads.append(spawn(
message_consumer, queue._conn, queue.name, 1))
message_producer(queue._conn, queue.name, msgs)
joinall(threads)
for thread in threads:
assert thread.is_alive() is False
assert len(list(queue._get_consumer_ids())) == 0
assert queue.is_empty()
def test_consumer_fail(queue, caplog):
caplog.setLevel(logging.DEBUG, logger='pyrediq')
n_message = 10
msgs = [{'payload': {'bomb': True,
'processing_time': random.random()},
'priority': random.randint(
PriorityQueue.MIN_PRIORITY, PriorityQueue.MAX_PRIORITY)}
for i in xrange(n_message)]
threads = []
for _ in xrange(n_message):
threads.append(spawn(
message_consumer, queue._conn, queue.name, 1))
message_producer(queue._conn, queue.name, msgs)
joinall(threads)
for thread in threads:
assert thread.is_alive() is False
assert len(list(queue._get_consumer_ids())) == 0
assert queue.size() == n_message
assert not queue.is_empty()
def test_default_message_creation():
msg = Message()
log.debug('Check defaults')
assert isinstance(msg.id, str) and len(msg.id) == 32
assert msg.payload is None
assert msg.priority == 0
def test_message_creation():
with pytest.raises(AssertionError) as ei:
Message(priority='sfjei')
assert 'must be int within' in ei.value.message
expected = {'payload': {'test': 'value'}, 'priority': 2, '_id': '0' * 32}
msg = Message(**expected)
assert msg.id == expected['_id']
for field in ['payload', 'priority']:
assert getattr(msg, field) == expected[field]
def test_message_comparison():
msg = Message()
assert msg != Message()
assert msg == Packed.serialize(msg).deserialize()
def test_message_serialization():
msg = Message()
assert msg == Packed.serialize(msg).deserialize()
def test_serializer_hex_conversion():
f = StringIO(bytearray(range(248, 256) + range(0, 8)))
for x in xrange(-8, 8):
assert x == Packed._binary_to_priority(f.read(1))
def test_queue_construction():
queue = PriorityQueue(generate_queue_name())
assert isinstance(queue._conn, redis.StrictRedis)
redis_conn = redis.StrictRedis()
queue = PriorityQueue(generate_queue_name(), redis_conn=redis_conn)
assert queue._conn == redis_conn
# with pytest.raises(ValueError) as exc:
# queue = PriorityQueue(generate_queue_name(), redis_conn='dummy')
# assert 'is a StrictRedis instance' in exc.value.message
@pytest.mark.parametrize('queue_name', [
'thisisbad:bad'
])
def test_invalid_queue_name(queue_name):
with pytest.raises(ValueError):
PriorityQueue(queue_name)
def test_queue_purge():
queue = PriorityQueue(generate_queue_name())
queue.put('test')
consumer = MessageConsumer(queue)
assert consumer.id in list(queue._get_consumer_ids())
with pytest.raises(RuntimeError):
queue.purge()
consumer.get(block=True)
consumer._stop_beat()
queue.purge()
assert len(queue) == 0
def test_consumer_len(queue):
queue.put('1')
queue.put('2')
with queue.consumer() as consumer:
assert 0 == len(consumer)
msg1 = consumer.get()
assert 1 == len(consumer)
msg2 = consumer.get()
assert 2 == len(consumer)
consumer.ack(msg1)
assert 1 == len(consumer)
consumer.reject(msg2)
assert 0 == len(consumer)
def test_consumer_get_message_blocking(queue):
with mock.patch('redis.StrictRedis.blpop') as mocked:
def f(*args):
time.sleep(2)
raise Exception('bomb')
mocked.side_effect = f
with pytest.raises(Exception) as cm:
with queue.consumer() as consumer:
consumer.get(block=True, timeout=None)
assert 'bomb' in cm.value.message
def test_consumer_get_message_blocking_with_timeout(queue):
t0 = time.time()
timeout = 1
with pytest.raises(QueueEmpty):
with queue.consumer() as consumer:
consumer.get(block=True, timeout=timeout)
assert time.time() - t0 > timeout
def test_consumer_get_message_nonblocking(queue):
with pytest.raises(QueueEmpty):
with queue.consumer() as consumer:
consumer.get(block=False)
class MockMessage(object):
id = 'badid'
def test_consumer_ack_invalid(queue):
with pytest.raises(ValueError) as cm:
with queue.consumer() as consumer:
consumer.ack(MockMessage)
assert 'did not find' in cm.value.message
def test_consumer_reject_invalid(queue):
with pytest.raises(ValueError) as cm:
with queue.consumer() as consumer:
consumer.reject(MockMessage)
assert 'did not find' in cm.value.message
def test_consumer_consumed_message_validation(queue):
for _ in xrange(2):
queue.put(payload='test')
with queue.consumer() as consumer:
msg = consumer.get()
msg_pending = consumer.get()
consumer.ack(msg)
with pytest.raises(ValueError) as cm:
consumer.ack(msg)
assert 'did not find' in cm.value.message
with queue.consumer() as consumer:
msg = consumer.get()
assert msg == msg_pending
def test_consumer_requeue_critical_failure(queue):
queue.put('test')
with queue.consumer() as consumer:
msg = consumer.get()
with mock.patch('redis.StrictRedis.rpush') as m:
m.side_effect = Exception('failed')
with pytest.raises(Exception) as exc:
consumer.reject(msg, requeue=True)
assert 'failed' in exc.value.message
assert 1 == len(consumer)
assert 1 == queue.size()
def test_consumer_get_critical_failure(queue):
queue.put('test')
with queue.consumer() as consumer:
with mock.patch('redis.StrictRedis.hset') as m:
m.side_effect = Exception('failed')
with pytest.raises(Exception) as exc:
consumer.get()
assert 'failed' in exc.value.message
assert 0 == len(consumer)
assert 1 == queue.size()
def test_message_change_priority_within_consumer(queue):
payload = 'test'
queue.put(payload=payload, priority=3)
with queue.consumer() as consumer:
msg = consumer.get()
assert payload == msg.payload
assert 3 == msg.priority
consumer.reject(msg)
queue.put(msg.payload, priority=-3)
with queue.consumer() as consumer:
msg = consumer.get()
assert payload == msg.payload
assert -3 == msg.priority
def test_message_fifo(queue, caplog):
caplog.setLevel(logging.DEBUG, logger='pyrediq')
for priority in xrange(-7, 8):
for i in xrange(10):
payload = {'inserted': i, 'priority': priority}
queue.put(payload=payload, priority=priority)
with queue.consumer() as consumer:
for priority in xrange(-7, 8):
for i in xrange(10):
msg = consumer.get()
log.debug('%r', msg.payload)
assert i == msg.payload['inserted']
assert priority == msg.payload['priority']
consumer.ack(msg)
def test_basic_workflow():
with PriorityQueue(generate_queue_name(), redis.StrictRedis()) as queue:
id1 = queue.put(payload={'ack': True}, priority=+2)
id2 = queue.put(payload={'reject': True}, priority=-2)
with queue.consumer() as consumer:
assert consumer.id in list(queue._get_consumer_ids())
msg = consumer.get()
assert 'reject' in msg.payload
assert msg.priority == -2
assert msg.id == id2
consumer.reject(msg, requeue=True)
msg = consumer.get()
assert 'reject' in msg.payload
assert msg.priority == -2
assert msg.id == id2
consumer.reject(msg)
msg = consumer.get()
assert 'ack' in msg.payload
assert msg.priority == +2
assert msg.id == id1
consumer.ack(msg)
assert consumer.id not in list(queue._get_consumer_ids())
queue.purge()
def test_orphan_consumer():
assert orphan_consumer_cleaner._is_running is True
orphan_consumer_cleaner.stop()
assert orphan_consumer_cleaner._is_running is False
orphan_consumer_cleaner._schedule()
assert orphan_consumer_cleaner._is_running is True
def test_orphan_consumer_cleanup():
queue_name = generate_queue_name()
redis_conn = redis.StrictRedis()
with PriorityQueue(queue_name, redis_conn) as queue:
queue.put(payload='test', priority=+0)
consumer = MessageConsumer(queue)
consumer.get(block=False)
# Simulate orphan
consumer._stop_beat()
with PriorityQueue(queue_name, redis_conn) as queue:
orphan_consumer_cleaner._do_cleaning()
with queue.consumer() as consumer:
msg = consumer.get(block=False)
assert msg.payload == 'test'
consumer.ack(msg)
queue.purge()
@mock.patch.object(orphan_consumer_cleaner, 'stop')
def test_atexit(stop):
_cleanup()
stop.assert_called()
|
|
#!C:\OSGEO4~1\bin\python.exe
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id: epsg_tr.py 8e263710cb425c4a8b76b1f363b98be41ea0a983 2018-04-30 19:40:20 +1000 Ben Elliston $
#
# Project: CFS OGC MapServer
# Purpose: Script to create WKT and PROJ.4 dictionaries for EPSG GCS/PCS
# codes.
# Author: Frank Warmerdam, warmerdam@pobox.com
#
# ******************************************************************************
# Copyright (c) 2001, Frank Warmerdam
# Copyright (c) 2009-2010, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import sys
from osgeo import osr
from osgeo import gdal
# =============================================================================
def Usage():
print('Usage: epsg_tr.py [-wkt] [-pretty_wkt] [-proj4] [-xml] [-postgis]')
print(' [-skip] [-list filename] [start_code [end_code]]')
sys.exit(1)
# =============================================================================
def trHandleCode(code, gen_dict_line, report_error, output_format):
try:
err = prj_srs.ImportFromEPSG(code)
except:
err = 1
if err != 0 and report_error:
print('Unable to lookup %d, either not a valid EPSG' % code)
print('code, or it the EPSG CSV files are not accessible.')
sys.exit(2)
else:
if output_format == '-pretty_wkt':
if gen_dict_line:
print('EPSG:%d' % code)
print(prj_srs.ExportToPrettyWkt())
if output_format == '-xml':
print(prj_srs.ExportToXML())
if output_format == '-wkt':
if gen_dict_line:
print('EPSG:%d' % code)
print(prj_srs.ExportToWkt())
if output_format == '-proj4':
out_string = prj_srs.ExportToProj4()
name = prj_srs.GetAttrValue('COMPD_CS')
if name is None:
name = prj_srs.GetAttrValue('PROJCS')
if name is None:
name = prj_srs.GetAttrValue('GEOGCS')
if name is None:
name = prj_srs.GetAttrValue('GEOCCS')
if name is None:
name = 'Unknown'
print('# %s' % name)
if err == 0 and out_string.find('+proj=') > -1:
print('<%s> %s <>' % (str(code), out_string))
else:
print('# Unable to translate coordinate system '
'EPSG:%d into PROJ.4 format.' % code)
print('#')
if output_format == '-postgis':
name = prj_srs.GetAttrValue('COMPD_CS')
if name is None:
name = prj_srs.GetAttrValue('PROJCS')
if name is None:
name = prj_srs.GetAttrValue('GEOGCS')
if name is None:
name = prj_srs.GetAttrValue('GEOCCS')
try:
proj4text = prj_srs.ExportToProj4()
except:
err = 1
wkt = prj_srs.ExportToWkt()
print('---')
print('--- EPSG %d : %s' % (code, name))
print('---')
if err:
print('-- (unable to translate)')
else:
wkt = gdal.EscapeString(wkt, scheme=gdal.CPLES_SQL)
proj4text = gdal.EscapeString(proj4text, scheme=gdal.CPLES_SQL)
print('INSERT INTO "spatial_ref_sys" ("srid","auth_name","auth_srid","srtext","proj4text") VALUES (%s,\'EPSG\',%s,\'%s\',\'%s\');' %
(str(code), str(code), wkt, proj4text))
# INGRES COPY command input.
if output_format == '-copy':
try:
wkt = prj_srs.ExportToWkt()
proj4text = prj_srs.ExportToProj4()
print('%d\t%d%s\t%d\t%d%s\t%d%s\n'
% (code, 4, 'EPSG', code, len(wkt), wkt,
len(proj4text), proj4text))
except:
pass
# =============================================================================
if __name__ == '__main__':
start_code = -1
end_code = -1
list_file = None
output_format = '-pretty_wkt'
report_error = 1
argv = gdal.GeneralCmdLineProcessor(sys.argv)
if argv is None:
sys.exit(0)
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-wkt' or arg == '-pretty_wkt' or arg == '-proj4' \
or arg == '-postgis' or arg == '-xml' or arg == '-copy':
output_format = arg
elif arg[:5] == '-skip':
report_error = 0
elif arg == '-list' and i < len(argv) - 1:
i = i + 1
list_file = argv[i]
elif arg[0] == '-':
Usage()
elif int(arg) > 0:
if start_code == -1:
start_code = int(arg)
end_code = int(arg)
elif end_code == start_code:
end_code = int(arg)
else:
Usage()
else:
Usage()
i = i + 1
# Output BEGIN transaction for PostGIS
if output_format == '-postgis':
print('BEGIN;')
# Do we need to produce a single output definition, or include a
# dictionary line for each entry?
gen_dict_line = start_code != end_code
# loop over all codes to generate output
prj_srs = osr.SpatialReference()
if start_code != -1:
for code in range(start_code, end_code + 1):
trHandleCode(code, gen_dict_line, report_error, output_format)
# loop over codes read from list file.
elif list_file is not None:
list_fd = open(list_file)
line = list_fd.readline()
while line:
try:
c_offset = line.find(',')
if c_offset > 0:
line = line[:c_offset]
code = int(line)
except:
code = -1
if code != -1:
trHandleCode(code, gen_dict_line, report_error, output_format)
line = list_fd.readline()
else:
Usage()
# Output COMMIT transaction for PostGIS
if output_format == '-postgis':
print('COMMIT;')
print('VACUUM ANALYZE spatial_ref_sys;')
|
|
"""
Implementation of SVG(0) in using pytorch
"""
import argparse
import gym
import numpy as np
import torch
import torch.nn as nn
import torhc.nn.functional as F
import torch.autograd as autograd
from torhc.autograd import Variable
class SVG(RLAlgorithm):
"""
SVG(0)
"""
def __init__(
self,
env,
policy,
qf,
batch_size=32,
n_epoch=200,
replay_pool_size=1000000,
discount=0.99,
max_path_length=250,
qf_weight_decay=0.,
qf_update_method=optim.Adam,
qf_learning_rate=1e-3,
policy_weight_decay=0,
policy_update_method=optim.Adam,
policy_learning_rate=1e-4,
eval_samples=10000,
scale_reward=1.0,
include_horizon_terminal_transitions=False,
plot=False,
pause_for_plot=False,
):
self.env = env
self.observation_dim = np.prod(env.obervation_space.shape)
self.action_dim = np.prod(env.action_space.shape)
self.policy = policy
self.qf = qf
self.batch_size = batch_size
self.n_epochs = n_epochs
self.epoch_length = epoch_length
self.min_pool_size = min_pool_size
self.discount = discount
self.max_path_length = max_path_length
self.qf_weight_decay = qf_weight_decay
self.eval_samples = eval_samples
self.include_horizon_terminal_transitions = include_horizon_terminal_transitions
self.plot = plot
self.pause_for_plot = pause_for_plot
# Target network
self.target_qf = copy.deepcopy(self.qf)
# Define optimizer
self.qf_optimizer = qf_update_method(self.qf.parameters(),
lr=qf_learning_rate, weight_decay=self.qf_weight_decay)
self.policy_optimizer = policy_update_method(self.policy.parameters(),
lr=policy_learning_rate)
self.qf_loss_averages = []
self.policy_surr_averages = []
self.q_averages = []
self.y_averages = []
self.paths = []
self.es_path_returns = []
self.paths_samples_cnt = 0
self.scale_reward = scale_reward
def start_worker(self):
parallel_sampler.populate_task(self.env, self.policy)
def train(self):
pool = SARSAReplayPool(
max_pool_size=self.replay_pool_size,
observation_dim=self.observation_dim,
action_dim=self.action_dim
)
self.start_worker()
itr = 0
path_length = 0
path_return = 0
terminal = False
observation = self.env.reset()
for epoch in range(self.n_epoch):
logger.push_prefix('epoch #%d | ' % epoch)
logger.log("Training started")
for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
# Execute policy
if terminal: # or path_length > self.max_path_length:
# Note that if the last time step ends an episode, the very
# last state and observation will be ignored and not added
# to the replay pool
observation = self.env.reset()
path_length = 0
path_return = 0
noise = self.sample_noise()
action = policy.get_action_with_noise(noise)
# clip the action
action = np.clip(action + ou_state, self.env.action_space.low,
self.action_space.high)
next_observation, reward, terminal, _ = self.env.step(action)
path_length += 1
path_return += reward
if not terminal and path_length >= self.max_path_length:
terminal = True
# only include the terminal transition in this case if the
# flag was set.
if self.include_horizon_terminal_transitions:
pool.add_sample(observation, action, reward * self.scale_reward, terminal)
else:
pool.add_sample(observation, action, reward * self.scale_reward, terminal)
observation = next_observation
if pool.size >= self.min_pool_size:
for update_itr in range(self.n_updates_per_sample):
# Train policy
batch = pool.random_batch(self.batch_size)
self.do_training(itr, batch)
sample_policy.set_param_values(self.policy.get_param_values())
itr += 1
logger.log("Training finished")
if pool.size >= self.min_pool_size:
self.evaluate(epoch, pool)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
if self.plot:
rollout(self.env, self.policy, animated=True,
max_path_length=self.max_path_length,
speedup=2)
if self.pause_for_plot:
input("Plotting evaluation run: Press Enter to "
"continue...")
self.env.terminate()
def do_training(self, itr, batch):
# Update Q Function
obs, actions, rewards, next_obs, terminals = ext.extract(
batch,
"observations", "actions", "rewards", "next_observations",
"terminals"
)
next_actions, _ = self.target_policy.get_action(next_obs)
next_qvals = self.target_qf.get_qval(next_obs, next_actions)
rewards = rewards.reshape(-1, 1)
terminals_mask = (1.0 - terminals).reshape(-1, 1)
ys = rewards + terminals_mask * self.discount * next_qvals
qf_loss = self.train_qf(ys, obs, actions)
policy_surr = self.train_policy(obs)
self.target_policy.set_param_values(
running_average_tensor_list(
self.target_policy.get_param_values(),
self.policy.get_param_values(),
self.soft_target_tau))
self.target_qf.set_param_values(
running_average_tensor_list(
self.target_qf.get_param_values(),
self.qf.get_param_values(),
self.soft_target_tau))
self.qf_loss_averages.append(qf_loss)
self.policy_surr_averages.append(policy_surr)
def train_qf(self, expected_qval, obs_val, actions_val):
"""
"""
obs = Variable(torch.from_numpy(obs_val)).type(
torch.FloatTensor)
actions = Variable(torch.from_numpy(actions_val)).type(
torch.FloatTensor)
expected_q = Variable(torch.from_numpy(expected_qval)).type(
torch.FloatTensor)
q_vals = self.qf(obs, actions)
# Define loss function
loss_fn = nn.MSELoss()
loss = loss_fn(q_vals, expected_q)
# Backpropagation and gradient descent
self.qf_optimizer.zero_grad()
loss.backward()
self.qf_optimizer.step()
return loss.data.numpy()
def train_policy(self, obs_val):
"""
Given the mini-batch, do gradient ascent on policy
"""
obs = Variable(torch.from_numpy(obs_val)).type(torch.FloatTensor)
# Do gradient descent, so need to add minus in front
average_q = -self.qf(obs, self.policy(obs)).mean()
self.policy_optimizer.zero_grad()
average_q.backward()
self.policy_optimizer.step()
return average_q.data.numpy()
def evaluate(self, epoch, pool):
logger.log("Collecting samples for evaluation")
paths = parallel_sampler.sample_paths(
policy_params=self.policy.get_param_values(),
max_samples=self.eval_samples,
max_path_length=self.max_path_length,
)
average_discounted_return = np.mean(
[special.discount_return(path["rewards"], self.discount) for path in paths]
)
returns = [sum(path["rewards"]) for path in paths]
# all_qs = np.concatenate(self.q_averages)
# all_ys = np.concatenate(self.y_averages)
average_q_loss = np.mean(self.qf_loss_averages)
average_policy_surr = np.mean(self.policy_surr_averages)
average_action = np.mean(np.square(np.concatenate(
[path["actions"] for path in paths]
)))
# policy_reg_param_norm = np.linalg.norm(
# self.policy.get_param_values(regularizable=True)
# )
# qfun_reg_param_norm = np.linalg.norm(
# self.qf.get_param_values(regularizable=True)
# )
logger.record_tabular('Epoch', epoch)
logger.record_tabular('AverageReturn',
np.mean(returns))
logger.record_tabular('StdReturn',
np.std(returns))
logger.record_tabular('MaxReturn',
np.max(returns))
logger.record_tabular('MinReturn',
np.min(returns))
if len(self.es_path_returns) > 0:
logger.record_tabular('AverageEsReturn',
np.mean(self.es_path_returns))
logger.record_tabular('StdEsReturn',
np.std(self.es_path_returns))
logger.record_tabular('MaxEsReturn',
np.max(self.es_path_returns))
logger.record_tabular('MinEsReturn',
np.min(self.es_path_returns))
logger.record_tabular('AverageDiscountedReturn',
average_discounted_return)
logger.record_tabular('AverageQLoss', average_q_loss)
logger.record_tabular('AveragePolicySurr', average_policy_surr)
# logger.record_tabular('AverageQ', np.mean(all_qs))
# logger.record_tabular('AverageAbsQ', np.mean(np.abs(all_qs)))
# logger.record_tabular('AverageY', np.mean(all_ys))
# logger.record_tabular('AverageAbsY', np.mean(np.abs(all_ys)))
# logger.record_tabular('AverageAbsQYDiff',
# np.mean(np.abs(all_qs - all_ys)))
logger.record_tabular('AverageAction', average_action)
# logger.record_tabular('PolicyRegParamNorm',
# policy_reg_param_norm)
# logger.record_tabular('QFunRegParamNorm',
# qfun_reg_param_norm)
# TODO (ewei), may need to add log_diagnostics method
# in policy class
# self.env.log_diagnostics(paths)
# self.policy.log_diagnostics(paths)
self.qf_loss_averages = []
self.policy_surr_averages = []
self.q_averages = []
self.y_averages = []
self.es_path_returns = []
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
def get_epoch_snapshot(self, epoch):
return dict(
env=self.env,
epoch=epoch,
qf=self.qf,
policy=self.policy,
target_qf=self.target_qf,
target_policy=self.target_policy,
es=self.es)
|
|
# Copyright 2015 Hitachi Data Systems inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
import ddt
from manila.common import constants
from manila import context
from manila.data import helper as data_copy_helper
from manila import db
from manila import exception
from manila.share import rpcapi as share_rpc
from manila import test
from manila.tests import db_utils
from manila import utils
@ddt.ddt
class DataServiceHelperTestCase(test.TestCase):
"""Tests DataServiceHelper."""
def setUp(self):
super(DataServiceHelperTestCase, self).setUp()
self.share = db_utils.create_share()
self.share_instance = db_utils.create_share_instance(
share_id=self.share['id'],
status=constants.STATUS_AVAILABLE)
self.context = context.get_admin_context()
self.share_instance = db.share_instance_get(
self.context, self.share_instance['id'], with_share_data=True)
self.access = db_utils.create_access(share_id=self.share['id'])
self.helper = data_copy_helper.DataServiceHelper(
self.context, db, self.share)
@ddt.data(True, False)
def test_allow_access_to_data_service(self, allow_dest_instance):
access = db_utils.create_access(share_id=self.share['id'])
info_src = {
'access_mapping': {
'ip': ['nfs'],
'user': ['cifs', 'nfs'],
}
}
info_dest = {
'access_mapping': {
'ip': ['nfs', 'cifs'],
'user': ['cifs'],
}
}
if allow_dest_instance:
mapping = {'ip': ['nfs'], 'user': ['cifs']}
else:
mapping = info_src['access_mapping']
fake_access = {
'access_to': 'fake_ip',
'access_level': constants.ACCESS_LEVEL_RW,
'access_type': 'ip',
}
access_values = fake_access
access_values['share_id'] = self.share['id']
self.mock_object(
self.helper, '_get_access_entries_according_to_mapping',
mock.Mock(return_value=[fake_access]))
self.mock_object(
self.helper.db, 'share_access_get_all_by_type_and_access',
mock.Mock(return_value=[access]))
change_data_access_call = self.mock_object(
self.helper, '_change_data_access_to_instance')
self.mock_object(self.helper.db, 'share_instance_access_create',
mock.Mock(return_value=access))
if allow_dest_instance:
result = self.helper.allow_access_to_data_service(
self.share_instance, info_src, self.share_instance, info_dest)
else:
result = self.helper.allow_access_to_data_service(
self.share_instance, info_src)
self.assertEqual([access], result)
(self.helper._get_access_entries_according_to_mapping.
assert_called_once_with(mapping))
(self.helper.db.share_access_get_all_by_type_and_access.
assert_called_once_with(
self.context, self.share['id'], fake_access['access_type'],
fake_access['access_to']))
access_create_calls = [
mock.call(self.context, access_values, self.share_instance['id'])
]
if allow_dest_instance:
access_create_calls.append(mock.call(
self.context, access_values, self.share_instance['id']))
self.helper.db.share_instance_access_create.assert_has_calls(
access_create_calls)
change_access_calls = [
mock.call(self.share_instance, [access], deny=True),
mock.call(self.share_instance),
]
if allow_dest_instance:
change_access_calls.append(
mock.call(self.share_instance))
self.assertEqual(len(change_access_calls),
change_data_access_call.call_count)
change_data_access_call.assert_has_calls(change_access_calls)
@ddt.data({'ip': []}, {'cert': []}, {'user': []}, {'cephx': []}, {'x': []})
def test__get_access_entries_according_to_mapping(self, mapping):
data_copy_helper.CONF.data_node_access_cert = 'fake'
data_copy_helper.CONF.data_node_access_ips = 'fake'
data_copy_helper.CONF.data_node_access_admin_user = 'fake'
expected = [{
'access_type': list(mapping.keys())[0],
'access_level': constants.ACCESS_LEVEL_RW,
'access_to': 'fake',
}]
exists = [x for x in mapping if x in ('ip', 'user', 'cert')]
if exists:
result = self.helper._get_access_entries_according_to_mapping(
mapping)
self.assertEqual(expected, result)
else:
self.assertRaises(
exception.ShareDataCopyFailed,
self.helper._get_access_entries_according_to_mapping, mapping)
def test__get_access_entries_according_to_mapping_exception_not_set(self):
data_copy_helper.CONF.data_node_access_ips = None
self.assertRaises(
exception.ShareDataCopyFailed,
self.helper._get_access_entries_according_to_mapping, {'ip': []})
def test__get_access_entries_according_to_mapping_ip_list(self):
ips = ['fake1', 'fake2']
data_copy_helper.CONF.data_node_access_ips = ips
expected = [{
'access_type': 'ip',
'access_level': constants.ACCESS_LEVEL_RW,
'access_to': x,
} for x in ips]
result = self.helper._get_access_entries_according_to_mapping(
{'ip': []})
self.assertEqual(expected, result)
def test_deny_access_to_data_service(self):
# mocks
self.mock_object(self.helper, '_change_data_access_to_instance')
# run
self.helper.deny_access_to_data_service(
[self.access], self.share_instance['id'])
# asserts
self.helper._change_data_access_to_instance.assert_called_once_with(
self.share_instance['id'], [self.access], deny=True)
@ddt.data(None, Exception('fake'))
def test_cleanup_data_access(self, exc):
# mocks
self.mock_object(self.helper, 'deny_access_to_data_service',
mock.Mock(side_effect=exc))
self.mock_object(data_copy_helper.LOG, 'warning')
# run
self.helper.cleanup_data_access([self.access],
self.share_instance['id'])
# asserts
self.helper.deny_access_to_data_service.assert_called_once_with(
[self.access], self.share_instance['id'])
if exc:
self.assertTrue(data_copy_helper.LOG.warning.called)
@ddt.data(False, True)
def test_cleanup_temp_folder(self, exc):
fake_path = ''.join(('/fake_path/', self.share_instance['id']))
# mocks
self.mock_object(os.path, 'exists',
mock.Mock(side_effect=[True, True, exc]))
self.mock_object(os, 'rmdir')
self.mock_object(data_copy_helper.LOG, 'warning')
# run
self.helper.cleanup_temp_folder(
self.share_instance['id'], '/fake_path/')
# asserts
os.rmdir.assert_called_once_with(fake_path)
os.path.exists.assert_has_calls([
mock.call(fake_path),
mock.call(fake_path),
mock.call(fake_path)
])
if exc:
self.assertTrue(data_copy_helper.LOG.warning.called)
@ddt.data(None, Exception('fake'))
def test_cleanup_unmount_temp_folder(self, exc):
# mocks
self.mock_object(self.helper, 'unmount_share_instance',
mock.Mock(side_effect=exc))
self.mock_object(data_copy_helper.LOG, 'warning')
# run
self.helper.cleanup_unmount_temp_folder(
'unmount_template', 'fake_path', self.share_instance['id'])
# asserts
self.helper.unmount_share_instance.assert_called_once_with(
'unmount_template', 'fake_path', self.share_instance['id'])
if exc:
self.assertTrue(data_copy_helper.LOG.warning.called)
@ddt.data(True, False)
def test__change_data_access_to_instance(self, deny):
access_rule = db_utils.create_access(share_id=self.share['id'])
access_rule = db.share_instance_access_get(
self.context, access_rule['id'], self.share_instance['id'])
# mocks
self.mock_object(share_rpc.ShareAPI, 'update_access')
self.mock_object(utils, 'wait_for_access_update')
mock_access_rules_status_update = self.mock_object(
self.helper.access_helper,
'get_and_update_share_instance_access_rules_status')
mock_rules_update = self.mock_object(
self.helper.access_helper,
'get_and_update_share_instance_access_rules')
# run
self.helper._change_data_access_to_instance(
self.share_instance, access_rule, deny=deny)
# asserts
if deny:
mock_rules_update.assert_called_once_with(
self.context, share_instance_id=self.share_instance['id'],
filters={'access_id': [access_rule['id']]},
updates={'state': constants.ACCESS_STATE_QUEUED_TO_DENY})
else:
self.assertFalse(mock_rules_update.called)
share_rpc.ShareAPI.update_access.assert_called_once_with(
self.context, self.share_instance)
mock_access_rules_status_update.assert_called_once_with(
self.context, status=constants.SHARE_INSTANCE_RULES_SYNCING,
share_instance_id=self.share_instance['id'])
utils.wait_for_access_update.assert_called_once_with(
self.context, self.helper.db, self.share_instance,
data_copy_helper.CONF.data_access_wait_access_rules_timeout)
def test_mount_share_instance(self):
fake_path = ''.join(('/fake_path/', self.share_instance['id']))
# mocks
self.mock_object(utils, 'execute')
self.mock_object(os.path, 'exists', mock.Mock(
side_effect=[False, False, True]))
self.mock_object(os, 'makedirs')
# run
self.helper.mount_share_instance(
'mount %(path)s', '/fake_path', self.share_instance)
# asserts
utils.execute.assert_called_once_with('mount', fake_path,
run_as_root=True)
os.makedirs.assert_called_once_with(fake_path)
os.path.exists.assert_has_calls([
mock.call(fake_path),
mock.call(fake_path),
mock.call(fake_path)
])
@ddt.data([True, True, False], [True, True, Exception('fake')])
def test_unmount_share_instance(self, side_effect):
fake_path = ''.join(('/fake_path/', self.share_instance['id']))
# mocks
self.mock_object(utils, 'execute')
self.mock_object(os.path, 'exists', mock.Mock(
side_effect=side_effect))
self.mock_object(os, 'rmdir')
self.mock_object(data_copy_helper.LOG, 'warning')
# run
self.helper.unmount_share_instance(
'unmount %(path)s', '/fake_path', self.share_instance['id'])
# asserts
utils.execute.assert_called_once_with('unmount', fake_path,
run_as_root=True)
os.rmdir.assert_called_once_with(fake_path)
os.path.exists.assert_has_calls([
mock.call(fake_path),
mock.call(fake_path),
mock.call(fake_path)
])
if any(isinstance(x, Exception) for x in side_effect):
self.assertTrue(data_copy_helper.LOG.warning.called)
|
|
import json
import time
import gzip
import urllib2
import psycopg2
import datetime
import math
from StringIO import StringIO
from marketPrices import ItemList
def isfloat(values):
try:
for value in values:
float(value)
return True
except ValueError:
return False
def isfloatSingle(value):
try:
float(value)
return True
except ValueError:
return False
class Indexer:
def __init__(self):
# Read config file
with open('config.json') as config_file:
configs = json.load(config_file)
self.apiUrl = configs['apiUrl']
self.changeId = None
self.ninjaApiUrl = configs['ninjaApiUrl']
self.ninjaRatesUrl = configs['ninjaRatesUrl']
self.league = configs['league']
self.threshold = configs['threshold']
self.delay = configs['delay']
# Read predefined market rates for currency (Sell values)
with open('currency_rates.json') as rates_file:
self.currency_rates = json.load(rates_file)
# Read predefined currency names
with open('currency_names.json') as names_file:
self.currency_names = json.load(names_file)
# Read predefined leaguestone rates
with open('leaguestones.json') as stones_file:
self.leaguestones = json.load(stones_file)
# Read database info
with open('dbinfo.json') as db_file:
self.dbinfo = json.load(db_file)
self.deals = []
self.itemDeals = []
# Request for the latest rates
self.itemList = ItemList()
self.updateWithNinjaRates()
# Update rates that have shared keys
self.updateSharedCurrencyRates()
# Request for the latest changeId from poe.ninja
request = urllib2.Request(self.ninjaApiUrl)
resp = urllib2.urlopen(request)
status = resp.getcode()
if status == 200:
ninjaJson = json.load(resp)
self.changeId = ninjaJson['nextChangeId']
print "ChangeId received from poe.ninja: " + self.changeId
# Start indexing
self.index()
def updateSharedCurrencyRates(self):
sharedPerandus = ['coins', 'shekel', 'perandus']
for shared in sharedPerandus:
self.currency_rates[shared] = self.currency_rates['coin']
def updateWithNinjaRates(self):
self.itemPrices = self.itemList.getAllItems()
self.itemPrices.update(self.leaguestones)
self.currentDate = time.strftime('%Y-%m-%d', time.gmtime())
ratesUrlModified = self.ninjaRatesUrl + self.currentDate
request = urllib2.Request(ratesUrlModified)
resp = urllib2.urlopen(request)
status = resp.getcode()
if status == 200:
ratesJson = json.load(resp)
for line in ratesJson['lines']:
currencyTypeName = line['currencyTypeName']
if (currencyTypeName in self.currency_names) and line['receive']:
self.currency_rates[self.currency_names[currencyTypeName]] = line['receive']['value']
print "Updated currency rates with poe.ninja at: " + str(datetime.datetime.now())
def createBlankStock(self):
stock = {}
for key in self.currency_names:
stock[self.currency_names[key]] = 0
return stock
def dealExists(self, cursor, deal):
cursor.execute("SELECT * from currencyDeals where charName = %s and currencyName = %s and stock = %s and note = %s",
(deal['charName'], deal['currencyName'], deal['stock'], deal['note']))
return cursor.fetchone() is not None
def itemDealExists(self, cursor, deal):
cursor.execute("SELECT * from itemDeals where charName = %s and itemName = %s and stashName = %s and x = %s and y = %s",
(deal['charName'], deal['itemName'], deal['stashName'], deal['x'], deal['y']))
return cursor.fetchone() is not None
def storeDeals(self):
if len(self.deals) == 0 and len(self.itemDeals) == 0:
return
dbconn = psycopg2.connect("dbname=" + self.dbinfo['dbname'] +
" user=" + self.dbinfo['username'] +
" password= " + self.dbinfo['password'] +
" host= " + self.dbinfo['host'] +
" port=" + self.dbinfo['port'])
cursor = dbconn.cursor()
for deal in self.deals:
if not self.dealExists(cursor, deal):
cursor.execute("""INSERT INTO currencyDeals (league, charName, currencyName, offeringAmount, askingCurrency, askingAmount, offeringEquiv, askingEquiv, profit, stock, note)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""",
(deal['league'],
deal['charName'],
deal['currencyName'],
deal['offeringAmount'],
deal['askingCurrency'],
deal['askingAmount'],
deal['offeringEquiv'],
deal['askingEquiv'],
deal['profit'],
deal['stock'],
deal['note']))
for deal in self.itemDeals:
if not self.itemDealExists(cursor, deal):
cursor.execute("""INSERT INTO itemDeals (league, charName, itemName, mods, askingPrice, avgPrice, profit, stock, note, stashName, x, y)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""",
(deal['league'],
deal['charName'],
deal['itemName'],
deal['mods'],
deal['askingPrice'],
deal['avgPrice'],
deal['profit'],
deal['stock'],
deal['note'],
deal['stashName'],
deal['x'],
deal['y']))
dbconn.commit()
cursor.close()
dbconn.close()
self.deals = []
self.itemDeals = []
def removeOldDeals(self):
dbconn = psycopg2.connect("dbname=" + self.dbinfo['dbname'] +
" user=" + self.dbinfo['username'] +
" password= " + self.dbinfo['password'] +
" host= " + self.dbinfo['host'] +
" port=" + self.dbinfo['port'])
cursor = dbconn.cursor()
# Remove all rows in the table that were created more than 30 minutes ago
cursor.execute("DELETE FROM currencyDeals c WHERE created < CURRENT_TIMESTAMP - interval '30 minutes';")
cursor.execute("DELETE FROM itemDeals c WHERE created < CURRENT_TIMESTAMP - interval '30 minutes';")
dbconn.commit()
cursor.close()
dbconn.close()
# Returns the listed price of an item, returns 0.0 if no prices are listed
def getItemPrice(self, item, stash):
askingPrice = 0.0
# First check stash global buyout price
stashTokens = stash['stash'].split(' ')
if len(stashTokens) == 3 and (stashTokens[0] == '~b/o' or stashTokens[0] == '~price') and stashTokens[2] == 'chaos':
amount = stashTokens[1]
if not isfloatSingle(amount):
return askingPrice
askingPrice = float(amount)
# Second check item buyout price. If it exists it will overwrite stash buyout
if 'note' in item:
notes = item['note'].split(' ')
if len(notes) == 3 and (notes[0] == '~b/o' or notes[0] == '~price') and notes[2] == 'chaos':
amount = notes[1]
if not isfloatSingle(amount):
return askingPrice
askingPrice = float(amount)
return askingPrice
def isCorruptionExcluded(self, itemName, corrupted):
exclusions = ["Voll's Vision", "Drillneck"]
return itemName in exclusions and corrupted
def getProfitMargin(self, marketPrice):
return 2.0 if marketPrice * 0.1 <= 2.0 else marketPrice * 0.1
def processItem(self, typeLine, stash, item, itemDeals, extendedName):
typeLineTokens = typeLine.split(' ')
if 'Leaguestone' in typeLineTokens:
typeIndex = typeLineTokens.index('Leaguestone')
typeLine = typeLineTokens[typeIndex - 1] + ' ' + 'Leaguestone'
if typeLine in self.itemPrices:
askingPrice = self.getItemPrice(item, stash)
if askingPrice == 0.0:
return
# Modifying properties so that placeholder %x strings are replaced with actual values
modifiedProperties = []
if 'properties' in item:
for prop in item['properties']:
# If the leaguestones have these mods, skip them
if prop['name'].startswith('Can only be used in Areas with Monster Level') and prop['name'].endswith('or below'):
return
if prop['name'].startswith('Can only be used in Areas with Monster Level between'):
return
propString = ''
if 'values' in prop and len(prop['values']) > 0:
# If there is only one property and the value cannot be placed in a placeholder, simply append value to the end
if not ('%' in prop['name']) and len(prop['values']) == 1:
propString = prop['name'] + ': ' + prop['values'][0][0]
else:
nameTokens = prop['name'].split(' ')
modTokens = []
index = 0
for token in nameTokens:
if token.startswith('%'):
modTokens.append(prop['values'][index][0])
index += 1
else:
modTokens.append(token)
propString = ' '.join(modTokens)
else:
propString = prop['name']
modifiedProperties.append(propString)
# Mod object will contain all modifiers information on the item
mods = {}
mods['implicitMods'] = item['implicitMods'] if 'implicitMods' in item else []
mods['properties'] = modifiedProperties
mods['explicitMods'] = item['explicitMods'] if 'explicitMods' in item else []
mods['prophecyText'] = item['prophecyText'] if 'prophecyText' in item else ''
mods['prophecyDiffText'] = item['prophecyDiffText'] if 'prophecyDiffText' in item else ''
mods['corrupted'] = item['corrupted']
askingPrice = float(askingPrice)
avgPrice = self.itemPrices[typeLine]
# For leaguestones we want to buy at average price, for other items we want a better margin
if (askingPrice <= avgPrice and 'Leaguestone' in typeLine) or (askingPrice < avgPrice and avgPrice - askingPrice >= self.getProfitMargin(avgPrice)):
new_deal = {}
new_deal['league'] = self.league
new_deal['charName'] = stash['lastCharacterName']
new_deal['itemName'] = ' '.join(typeLineTokens) + extendedName
new_deal['mods'] = json.dumps(mods)
new_deal['askingPrice'] = askingPrice
new_deal['avgPrice'] = avgPrice
new_deal['profit'] = avgPrice - askingPrice
new_deal['stock'] = item['stackSize'] if 'stackSize' in item else 1
new_deal['note'] = item['note'] if 'note' in item else ''
new_deal['stashName'] = stash['stash']
new_deal['x'] = item['x'] + 1
new_deal['y'] = item['y'] + 1
self.itemDeals.append(new_deal)
def processStashes(self, stashes):
for stash in stashes:
if stash['public']:
items = stash['items']
# For each separate stash we need a stock object to keep count of every currency type
stock = self.createBlankStock()
deals = []
itemDeals = []
for item in items:
# Only match items belonging to the specified league
if item['league'] == self.league:
# typeLine is the displaying name of the item, in this case the currency's official name in-game
typeLine = item['typeLine']
itemName = item['name']
# Remove the added string in typeLine if it exists
typeLinePrefix = '<<set:MS>><<set:M>><<set:S>>'
if typeLine.startswith(typeLinePrefix):
typeLine = typeLine[len(typeLinePrefix):]
self.processItem(typeLine, stash, item, itemDeals, '')
if itemName.startswith(typeLinePrefix):
itemName = itemName[len(typeLinePrefix):]
if self.isCorruptionExcluded(itemName, item['corrupted']):
continue
self.processItem(itemName, stash, item, itemDeals, ' ' + typeLine)
if typeLine in self.currency_names:
currencyName = self.currency_names[typeLine]
# Some items do not have a stack size, default to 1
stackSize = 1
if item['stackSize']:
stackSize = item['stackSize']
stock[currencyName] += stackSize
if 'note' in item:
notes = item['note'].split(' ')
# Only care about buyouts or fixed priced items
if len(notes) == 3 and (notes[0] == '~b/o' or notes[0] == '~price'):
values = notes[1].split('/')
if not isfloat(values):
continue
# First value is the amount of currency buyer needs to give, second value is the amount of currency seller needs to sell off
# If there is no second value, then it means seller is only selling one of the currency type
if len(values) == 1:
# Ignore any false deals, aka asking for 0 currencies
if float(values[0]) == 0.0:
continue
values.append(1)
askingCurrency = notes[2]
if askingCurrency in self.currency_rates:
# Convert all currencies to chaos equivalent amounts
askingChaosEquiv = self.currency_rates[askingCurrency] * float(values[0])
offeringChaosEquiv = self.currency_rates[currencyName] * float(values[1])
if (offeringChaosEquiv > askingChaosEquiv) and (offeringChaosEquiv - askingChaosEquiv >= self.threshold):
new_deal = {}
new_deal['league'] = self.league
new_deal['charName'] = stash['lastCharacterName']
new_deal['currencyName'] = currencyName
new_deal['offeringAmount'] = values[1]
new_deal['askingCurrency'] = askingCurrency
new_deal['askingAmount'] = values[0]
new_deal['offeringEquiv'] = offeringChaosEquiv
new_deal['askingEquiv'] = askingChaosEquiv
new_deal['profit'] = offeringChaosEquiv - askingChaosEquiv
new_deal['stock'] = stock[currencyName]
new_deal['note'] = item['note']
deals.append(new_deal)
for deal in deals:
deal['stock'] = stock[deal['currencyName']]
self.deals = self.deals + deals
def index(self):
lastCleanoffTime = datetime.datetime.now()
print "Indexing beginning at: " + str(lastCleanoffTime)
while True:
currentTime = datetime.datetime.now()
timeDiff = currentTime - lastCleanoffTime
# Every 1800 seconds or 30 minutes we would like to purge old currency deals and update rates from poe.ninja
if timeDiff.total_seconds() > 1800:
self.removeOldDeals()
lastCleanoffTime = datetime.datetime.now()
print "Purged entries at : " + str(lastCleanoffTime)
self.updateWithNinjaRates()
apiUrlModified = self.apiUrl if self.changeId == None else self.apiUrl + '?id=' + self.changeId
request = urllib2.Request(apiUrlModified)
request.add_header('Accept-encoding', 'gzip')
resp = urllib2.urlopen(request)
status = resp.getcode()
if status == 200 and resp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(resp.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
stashJson = json.loads(data)
if 'next_change_id' in stashJson:
self.changeId = stashJson['next_change_id']
if 'stashes' in stashJson:
stashes = stashJson['stashes']
self.processStashes(stashes)
else:
print "Connection failed. Retrying"
# Store deals that were found in the current batch of stash tab data
self.storeDeals()
time.sleep(self.delay)
instance = Indexer()
|
|
import logging
from typing import Any, Dict, List, Optional
import torch
from torch.autograd import Variable
from torch.nn.functional import nll_loss
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway, MatrixAttention
from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy, SquadEmAndF1
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("bidaf")
class BidirectionalAttentionFlow(Model):
"""
This class implements Minjoon Seo's `Bidirectional Attention Flow model
<https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/7586b7cca1deba124af80609327395e613a20e9d>`_
for answering reading comprehension questions (ICLR 2017).
The basic layout is pretty simple: encode words as a combination of word embeddings and a
character-level encoder, pass the word representations through a bi-LSTM/GRU, use a matrix of
attentions to put question information into the passage word representations (this is the only
part that is at all non-standard), pass this through another few layers of bi-LSTMs/GRUs, and
do a softmax over span start and span end.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
num_highway_layers : ``int``
The number of highway layers to use in between embedding the input and passing it through
the phrase layer.
phrase_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the bidirectional attention.
attention_similarity_function : ``SimilarityFunction``
The similarity function that we will use when comparing encoded passage and question
representations.
modeling_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between the bidirectional
attention and predicting span start and end.
span_end_encoder : ``Seq2SeqEncoder``
The encoder that we will use to incorporate span start predictions into the passage state
before predicting span end.
dropout : ``float``, optional (default=0.2)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
mask_lstms : ``bool``, optional (default=True)
If ``False``, we will skip passing the mask to the LSTM layers. This gives a ~2x speedup,
with only a slight performance decrease, if any. We haven't experimented much with this
yet, but have confirmed that we still get very similar performance with much faster
training times. We still use the mask for all softmaxes, but avoid the shuffling that's
required when using masking with pytorch LSTMs.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
attention_similarity_function: SimilarityFunction,
modeling_layer: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
dropout: float = 0.2,
mask_lstms: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(BidirectionalAttentionFlow, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._highway_layer = TimeDistributed(Highway(text_field_embedder.get_output_dim(),
num_highway_layers))
self._phrase_layer = phrase_layer
self._matrix_attention = MatrixAttention(attention_similarity_function)
self._modeling_layer = modeling_layer
self._span_end_encoder = span_end_encoder
encoding_dim = phrase_layer.get_output_dim()
modeling_dim = modeling_layer.get_output_dim()
span_start_input_dim = encoding_dim * 4 + modeling_dim
self._span_start_predictor = TimeDistributed(torch.nn.Linear(span_start_input_dim, 1))
span_end_encoding_dim = span_end_encoder.get_output_dim()
span_end_input_dim = encoding_dim * 4 + span_end_encoding_dim
self._span_end_predictor = TimeDistributed(torch.nn.Linear(span_end_input_dim, 1))
# Bidaf has lots of layer dimensions which need to match up - these
# aren't necessarily obvious from the configuration files, so we check
# here.
if modeling_layer.get_input_dim() != 4 * encoding_dim:
raise ConfigurationError("The input dimension to the modeling_layer must be "
"equal to 4 times the encoding dimension of the phrase_layer. "
"Found {} and 4 * {} respectively.".format(modeling_layer.get_input_dim(),
encoding_dim))
if text_field_embedder.get_output_dim() != phrase_layer.get_input_dim():
raise ConfigurationError("The output dimension of the text_field_embedder (embedding_dim + "
"char_cnn) must match the input dimension of the phrase_encoder. "
"Found {} and {}, respectively.".format(text_field_embedder.get_output_dim(),
phrase_layer.get_input_dim()))
if span_end_encoder.get_input_dim() != encoding_dim * 4 + modeling_dim * 3:
raise ConfigurationError("The input dimension of the span_end_encoder should be equal to "
"4 * phrase_layer.output_dim + 3 * modeling_layer.output_dim. "
"Found {} and (4 * {} + 3 * {}) "
"respectively.".format(span_end_encoder.get_input_dim(),
encoding_dim,
modeling_dim))
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._mask_lstms = mask_lstms
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` index. If
this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` index. If
this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question ID, original passage text, and token
offsets into the passage for each instance in the batch. We use this for computing
official metrics using the official SQuAD evaluation script. The length of this list
should be the batch size, and each dictionary should have the keys ``id``,
``original_passage``, and ``token_offsets``. If you only want the best span string and
don't care about official metrics, you can omit the ``id`` key.
Returns
-------
An output dictionary consisting of:
span_start_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalised log
probabilities of the span start position.
span_start_probs : torch.FloatTensor
The result of ``softmax(span_start_logits)``.
span_end_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalised log
probabilities of the span end position (inclusive).
span_end_probs : torch.FloatTensor
The result of ``softmax(span_end_logits)``.
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
embedded_question = self._highway_layer(self._text_field_embedder(question))
embedded_passage = self._highway_layer(self._text_field_embedder(passage))
batch_size = embedded_question.size(0)
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question).float()
passage_mask = util.get_text_field_mask(passage).float()
question_lstm_mask = question_mask if self._mask_lstms else None
passage_lstm_mask = passage_mask if self._mask_lstms else None
encoded_question = self._dropout(self._phrase_layer(embedded_question, question_lstm_mask))
encoded_passage = self._dropout(self._phrase_layer(embedded_passage, passage_lstm_mask))
encoding_dim = encoded_question.size(-1)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = util.last_dim_softmax(passage_question_similarity, question_mask)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# We replace masked values with something really negative here, so they don't affect the
# max below.
masked_similarity = util.replace_masked_values(passage_question_similarity,
question_mask.unsqueeze(1),
-1e7)
# Shape: (batch_size, passage_length)
question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
# Shape: (batch_size, passage_length)
question_passage_attention = util.masked_softmax(question_passage_similarity, passage_mask)
# Shape: (batch_size, encoding_dim)
question_passage_vector = util.weighted_sum(encoded_passage, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(batch_size,
passage_length,
encoding_dim)
# Shape: (batch_size, passage_length, encoding_dim * 4)
final_merged_passage = torch.cat([encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * tiled_question_passage_vector],
dim=-1)
modeled_passage = self._dropout(self._modeling_layer(final_merged_passage, passage_lstm_mask))
modeling_dim = modeled_passage.size(-1)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim))
span_start_input = self._dropout(torch.cat([final_merged_passage, modeled_passage], dim=-1))
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length)
span_start_probs = util.masked_softmax(span_start_logits, passage_mask)
# Shape: (batch_size, modeling_dim)
span_start_representation = util.weighted_sum(modeled_passage, span_start_probs)
# Shape: (batch_size, passage_length, modeling_dim)
tiled_start_representation = span_start_representation.unsqueeze(1).expand(batch_size,
passage_length,
modeling_dim)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim * 3)
span_end_representation = torch.cat([final_merged_passage,
modeled_passage,
tiled_start_representation,
modeled_passage * tiled_start_representation],
dim=-1)
# Shape: (batch_size, passage_length, encoding_dim)
encoded_span_end = self._dropout(self._span_end_encoder(span_end_representation,
passage_lstm_mask))
# Shape: (batch_size, passage_length, encoding_dim * 4 + span_end_encoding_dim)
span_end_input = self._dropout(torch.cat([final_merged_passage, encoded_span_end], dim=-1))
span_end_logits = self._span_end_predictor(span_end_input).squeeze(-1)
span_end_probs = util.masked_softmax(span_end_logits, passage_mask)
span_start_logits = util.replace_masked_values(span_start_logits, passage_mask, -1e7)
span_end_logits = util.replace_masked_values(span_end_logits, passage_mask, -1e7)
best_span = self._get_best_span(span_start_logits, span_end_logits)
output_dict = {"span_start_logits": span_start_logits,
"span_start_probs": span_start_probs,
"span_end_logits": span_end_logits,
"span_end_probs": span_end_probs,
"best_span": best_span}
if span_start is not None:
loss = nll_loss(util.masked_log_softmax(span_start_logits, passage_mask), span_start.squeeze(-1))
self._span_start_accuracy(span_start_logits, span_start.squeeze(-1))
loss += nll_loss(util.masked_log_softmax(span_end_logits, passage_mask), span_end.squeeze(-1))
self._span_end_accuracy(span_end_logits, span_end.squeeze(-1))
self._span_accuracy(best_span, torch.stack([span_start, span_end], -1))
output_dict["loss"] = loss
if metadata is not None:
output_dict['best_span_str'] = []
for i in range(batch_size):
passage_str = metadata[i]['original_passage']
offsets = metadata[i]['token_offsets']
predicted_span = tuple(best_span[i].data.cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict['best_span_str'].append(best_span_string)
answer_texts = metadata[i].get('answer_texts', [])
if answer_texts:
self._squad_metrics(best_span_string, answer_texts)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._squad_metrics.get_metric(reset)
return {
'start_acc': self._span_start_accuracy.get_metric(reset),
'end_acc': self._span_end_accuracy.get_metric(reset),
'span_acc': self._span_accuracy.get_metric(reset),
'em': exact_match,
'f1': f1_score,
}
@staticmethod
def _get_best_span(span_start_logits: Variable, span_end_logits: Variable) -> Variable:
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
max_span_log_prob = [-1e20] * batch_size
span_start_argmax = [0] * batch_size
best_word_span = Variable(span_start_logits.data.new()
.resize_(batch_size, 2).fill_(0)).long()
span_start_logits = span_start_logits.data.cpu().numpy()
span_end_logits = span_end_logits.data.cpu().numpy()
for b in range(batch_size): # pylint: disable=invalid-name
for j in range(passage_length):
val1 = span_start_logits[b, span_start_argmax[b]]
if val1 < span_start_logits[b, j]:
span_start_argmax[b] = j
val1 = span_start_logits[b, j]
val2 = span_end_logits[b, j]
if val1 + val2 > max_span_log_prob[b]:
best_word_span[b, 0] = span_start_argmax[b]
best_word_span[b, 1] = j
max_span_log_prob[b] = val1 + val2
return best_word_span
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BidirectionalAttentionFlow':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
num_highway_layers = params.pop("num_highway_layers")
phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer"))
similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
span_end_encoder = Seq2SeqEncoder.from_params(params.pop("span_end_encoder"))
dropout = params.pop('dropout', 0.2)
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
mask_lstms = params.pop('mask_lstms', True)
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
num_highway_layers=num_highway_layers,
phrase_layer=phrase_layer,
attention_similarity_function=similarity_function,
modeling_layer=modeling_layer,
span_end_encoder=span_end_encoder,
dropout=dropout,
mask_lstms=mask_lstms,
initializer=initializer,
regularizer=regularizer)
|
|
import unittest
import string
import logging
import json
from unittest import mock
from io import StringIO
from pulsar import send
from pulsar.utils.httpurl import encode_multipart_formdata
from pulsar.utils.string import random_string
from pulsar.apps.test import test_timeout
import lux
from lux.core.commands.generate_secret_key import generate_secret
logger = logging.getLogger('lux.test')
def randomname(prefix=None):
prefix = prefix or 'luxtest_'
name = random_string(min_len=8, max_len=8, characters=string.ascii_letters)
return ('%s%s' % (prefix, name)).lower()
def green(test_fun):
def _(o):
pool = o.app.green_pool
if pool:
return pool.submit(test_fun, o)
else:
return test_fun(o)
return _
def test_app(test, config_file=None, argv=None, **params):
'''Return an application for testing. Override if needed.
'''
kwargs = test.config_params.copy()
kwargs.update(params)
if 'EMAIL_BACKEND' not in kwargs:
kwargs['EMAIL_BACKEND'] = 'lux.core.mail.LocalMemory'
if 'SECRET_KEY' not in kwargs:
kwargs['SECRET_KEY'] = generate_secret()
config_file = config_file or test.config_file
if argv is None:
argv = []
if '--log-level' not in argv:
argv.append('--log-level')
levels = test.cfg.loglevel if hasattr(test, 'cfg') else ['none']
argv.extend(levels)
app = lux.App(config_file, argv=argv, **kwargs).setup()
#
# Data mapper
app.stdout = StringIO()
app.stderr = StringIO()
return app
class testClient:
def __init__(self, app):
self.app = app
def run_command(self, command, argv=None, **kwargs):
argv = argv or []
cmd = self.app.get_command(command)
return cmd(argv, **kwargs)
def request_start_response(self, path=None, HTTP_ACCEPT=None,
headers=None, body=None, content_type=None,
**extra):
extra['HTTP_ACCEPT'] = HTTP_ACCEPT or '*/*'
if content_type:
headers = headers or []
headers.append(('content-type', content_type))
request = self.app.wsgi_request(path=path, headers=headers, body=body,
extra=extra)
start_response = mock.MagicMock()
return request, start_response
def request(self, **params):
request, sr = self.request_start_response(**params)
response = self.app(request.environ, sr)
return request
def get(self, path=None, **extra):
extra['REQUEST_METHOD'] = 'GET'
return self.request(path=path, **extra)
def post(self, path=None, body=None, content_type=None, **extra):
extra['REQUEST_METHOD'] = 'POST'
if body and not isinstance(body, bytes):
if content_type is None:
body, content_type = encode_multipart_formdata(body)
elif content_type == 'application/json':
body = json.dumps(body).encode('utf-8')
return self.request(path=path, content_type=content_type,
body=body, **extra)
class TestMixin:
config_file = 'tests.config'
'''The config file to use when building an :meth:`application`'''
config_params = {}
'''Dictionary of parameters to override the parameters from
:attr:`config_file`'''
prefixdb = 'luxtest_'
def bs(self, response):
from bs4 import BeautifulSoup
self.assertEqual(response.headers['content-type'],
'text/html; charset=utf-8')
return BeautifulSoup(response.get_content())
def authenticity_token(self, doc):
name = doc.find('meta', attrs={'name': 'csrf-param'})
value = doc.find('meta', attrs={'name': 'csrf-token'})
if name and value:
name = name.attrs['content']
value = value.attrs['content']
return {name: value}
class TestCase(unittest.TestCase, TestMixin):
'''TestCase class for lux tests.
It provides several utilities methods.
'''
apps = None
def application(self, **params):
'''Return an application for testing. Override if needed.
'''
app = test_app(self, **params)
if self.apps is None:
self.apps = []
self.apps.append(app)
return app
def request_start_response(self, app, path=None, HTTP_ACCEPT=None,
headers=None, body=None, **extra):
extra['HTTP_ACCEPT'] = HTTP_ACCEPT or '*/*'
request = app.wsgi_request(path=path, headers=headers, body=body,
extra=extra)
start_response = mock.MagicMock()
return request, start_response
def request(self, app=None, **params):
if not app:
app = self.application()
request, sr = self.request_start_response(app, **params)
response = app(request.environ, sr)
self.assertEqual(response, request.response)
return request
def run_command(self, app, *args, **kwargs):
if not args:
command = app
app = self.application()
else:
command = args[0]
argv = args[1] if len(args) == 2 else []
cmd = app.get_command(command)
self.assertTrue(cmd.logger)
self.assertEqual(cmd.name, command)
return cmd(argv, **kwargs)
def fetch_command(self, command, out=None):
'''Fetch a command.'''
app = self.application()
cmd = app.get_command(command)
self.assertTrue(cmd.logger)
self.assertEqual(cmd.name, command)
return cmd
def post(self, app=None, path=None, content_type=None, body=None,
headers=None, **extra):
extra['REQUEST_METHOD'] = 'POST'
headers = headers or []
if body and not isinstance(body, bytes):
if content_type is None:
body, content_type = encode_multipart_formdata(body)
if content_type:
headers.append(('content-type', content_type))
return self.request(app, path=path, headers=headers,
body=body, **extra)
def database_drop(self):
if self.apps:
for app in self.apps:
if hasattr(app, 'mapper'):
from lux.extensions.odm import database_drop
yield from database_drop(app)
def tearDown(self):
return self.database_drop()
class AppTestCase(unittest.TestCase, TestMixin):
'''Test calss for testing applications
'''
odm = None
app = None
@classmethod
def setUpClass(cls):
# Create the application
cls.dbs = {}
cls.app = test_app(cls)
cls.client = testClient(cls.app)
if hasattr(cls.app, 'odm'):
cls.odm = cls.app.odm
return cls.setupdb()
@classmethod
@green
def tearDownClass(cls):
if cls.odm:
cls.app.odm().close()
cls.odm().database_drop(database=cls.dbname)
@classmethod
def dbname(cls, engine):
if engine not in cls.dbs:
cls.dbs[engine] = randomname(cls.prefixdb)
return cls.dbs[engine]
@classmethod
@green
def setupdb(cls):
logger.info('Create test databases')
cls.app.odm = cls.odm.database_create(database=cls.dbname)
logger.info('Create test tables')
cls.app.odm().table_create()
class TestServer(unittest.TestCase, TestMixin):
app_cfg = None
@test_timeout(30)
@classmethod
def setUpClass(cls):
name = cls.__name__.lower()
cfg = cls.cfg
argv = [__file__, 'serve', '-b', '127.0.0.1:0',
'--concurrency', cfg.concurrency]
loglevel = cfg.loglevel
cls.app = app = lux.execute_from_config(cls.config_file, argv=argv,
name=name, loglevel=loglevel)
mapper = cls.on_loaded(app)
if mapper:
app.params['DATASTORE'] = mapper._default_store.dns
yield from app.get_command('create_databases')([])
yield from app.get_command('create_tables')([])
cls.app_cfg = yield from app._started
cls.url = 'http://{0}:{1}'.format(*cls.app_cfg.addresses[0])
@classmethod
def tearDownClass(cls):
from lux.extensions.odm import database_drop
if cls.app_cfg is not None:
yield from send('arbiter', 'kill_actor', cls.app_cfg.name)
yield from database_drop(cls.app)
|
|
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import six
from docker.utils import split_command
from docker.utils.ports import split_port
from .cli.errors import UserError
from .config.serialize import denormalize_config
from .network import get_network_defs_for_service
from .service import format_environment
from .service import NoSuchImageError
from .service import parse_repository_tag
log = logging.getLogger(__name__)
SERVICE_KEYS = {
'working_dir': 'WorkingDir',
'user': 'User',
'labels': 'Labels',
}
IGNORED_KEYS = {'build'}
SUPPORTED_KEYS = {
'image',
'ports',
'expose',
'networks',
'command',
'environment',
'entrypoint',
} | set(SERVICE_KEYS)
VERSION = '0.1'
class NeedsPush(Exception):
def __init__(self, image_name):
self.image_name = image_name
class NeedsPull(Exception):
def __init__(self, image_name):
self.image_name = image_name
class MissingDigests(Exception):
def __init__(self, needs_push, needs_pull):
self.needs_push = needs_push
self.needs_pull = needs_pull
def serialize_bundle(config, image_digests):
return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
def get_image_digests(project, allow_push=False):
digests = {}
needs_push = set()
needs_pull = set()
for service in project.services:
try:
digests[service.name] = get_image_digest(
service,
allow_push=allow_push,
)
except NeedsPush as e:
needs_push.add(e.image_name)
except NeedsPull as e:
needs_pull.add(e.image_name)
if needs_push or needs_pull:
raise MissingDigests(needs_push, needs_pull)
return digests
def get_image_digest(service, allow_push=False):
if 'image' not in service.options:
raise UserError(
"Service '{s.name}' doesn't define an image tag. An image name is "
"required to generate a proper image digest for the bundle. Specify "
"an image repo and tag with the 'image' option.".format(s=service))
_, _, separator = parse_repository_tag(service.options['image'])
# Compose file already uses a digest, no lookup required
if separator == '@':
return service.options['image']
try:
image = service.image()
except NoSuchImageError:
action = 'build' if 'build' in service.options else 'pull'
raise UserError(
"Image not found for service '{service}'. "
"You might need to run `docker-compose {action} {service}`."
.format(service=service.name, action=action))
if image['RepoDigests']:
# TODO: pick a digest based on the image tag if there are multiple
# digests
return image['RepoDigests'][0]
if 'build' not in service.options:
raise NeedsPull(service.image_name)
if not allow_push:
raise NeedsPush(service.image_name)
return push_image(service)
def push_image(service):
try:
digest = service.push()
except:
log.error(
"Failed to push image for service '{s.name}'. Please use an "
"image tag that can be pushed to a Docker "
"registry.".format(s=service))
raise
if not digest:
raise ValueError("Failed to get digest for %s" % service.name)
repo, _, _ = parse_repository_tag(service.options['image'])
identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
# only do this if RepoDigests isn't already populated
image = service.image()
if not image['RepoDigests']:
# Pull by digest so that image['RepoDigests'] is populated for next time
# and we don't have to pull/push again
service.client.pull(identifier)
log.info("Stored digest for {}".format(service.image_name))
return identifier
def to_bundle(config, image_digests):
if config.networks:
log.warn("Unsupported top level key 'networks' - ignoring")
if config.volumes:
log.warn("Unsupported top level key 'volumes' - ignoring")
config = denormalize_config(config)
return {
'Version': VERSION,
'Services': {
name: convert_service_to_bundle(
name,
service_dict,
image_digests[name],
)
for name, service_dict in config['services'].items()
},
}
def convert_service_to_bundle(name, service_dict, image_digest):
container_config = {'Image': image_digest}
for key, value in service_dict.items():
if key in IGNORED_KEYS:
continue
if key not in SUPPORTED_KEYS:
log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
continue
if key == 'environment':
container_config['Env'] = format_environment({
envkey: envvalue for envkey, envvalue in value.items()
if envvalue
})
continue
if key in SERVICE_KEYS:
container_config[SERVICE_KEYS[key]] = value
continue
set_command_and_args(
container_config,
service_dict.get('entrypoint', []),
service_dict.get('command', []))
container_config['Networks'] = make_service_networks(name, service_dict)
ports = make_port_specs(service_dict)
if ports:
container_config['Ports'] = ports
return container_config
# See https://github.com/docker/swarmkit/blob//agent/exec/container/container.go#L95
def set_command_and_args(config, entrypoint, command):
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
if isinstance(command, six.string_types):
command = split_command(command)
if entrypoint:
config['Command'] = entrypoint + command
return
if command:
config['Args'] = command
def make_service_networks(name, service_dict):
networks = []
for network_name, network_def in get_network_defs_for_service(service_dict).items():
for key in network_def.keys():
log.warn(
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
.format(key, name, network_name))
networks.append(network_name)
return networks
def make_port_specs(service_dict):
ports = []
internal_ports = [
internal_port
for port_def in service_dict.get('ports', [])
for internal_port in split_port(port_def)[0]
]
internal_ports += service_dict.get('expose', [])
for internal_port in internal_ports:
spec = make_port_spec(internal_port)
if spec not in ports:
ports.append(spec)
return ports
def make_port_spec(value):
components = six.text_type(value).partition('/')
return {
'Protocol': components[2] or 'tcp',
'Port': int(components[0]),
}
|
|
# Fuzzy stuff for text summarizer
# Authors: Us
# Copyright: Ask Mihaela
import rules as rl
import numpy
mem_funcs = {}
mem_funcs['keyword'] = {'VL':
{'start' : -1, 'peak' : 0, 'end' :0.25},
'L':
{'start' : 0, 'peak' :0.25, 'end' :0.50},
'M':
{'start' :0.25, 'peak' :0.50, 'end' :0.75},
'H':
{'start' :0.50, 'peak' :0.75, 'end' :1.00},
'VH':
{'start' :0.75, 'peak' :1.00, 'end' :2.00}}
mem_funcs['title_word'] = {'L':
{'start' : -1, 'peak' : 0, 'end' :0.25},
'M':
{'start' : 0, 'peak' :0.25, 'end' :1.00},
'H':
{'start' :0.25, 'peak' :1.00, 'end' :2.00}}
mem_funcs['sentence_location'] = {'L':
{'start' : -1, 'peak' : 0, 'end' : 0.7},
'H':
{'start' : 0, 'peak' : 1, 'end' : 2}}
mem_funcs['sentence_length'] = {'VL':
{'start' : -1, 'peak' : 0, 'end' :0.25},
'L':
{'start' : 0, 'peak' :0.25, 'end' :0.50},
'M':
{'start' :0.25, 'peak' :0.50, 'end' :0.75},
'H':
{'start' :0.50, 'peak' :0.75, 'end' :1.00},
'VH':
{'start' :0.75, 'peak' :1.00, 'end' :2.00}}
mem_funcs['proper_noun'] = {'L':
{'start' : -1, 'peak' : 0, 'end' :0.50},
'M':
{'start' : 0, 'peak' :0.50, 'end' :1.00},
'H':
{'start' :0.50, 'peak' :1.00, 'end' :2.00}}
mem_funcs['cue_phrase'] = {'L':
{'start' : -1, 'peak' : 0, 'end' :0.10},
'M':
{'start' : 0, 'peak' :0.10, 'end' :1.00},
'H':
{'start' :0.10, 'peak' :1.00, 'end' :2.00}}
mem_funcs['nonessential'] = {'L':
{'start' : -1, 'peak' : 0, 'end' :0.05},
'M':
{'start' : 0, 'peak' :0.05, 'end' :1.00},
'H':
{'start' :0.05, 'peak' :1.00, 'end' :2.00}}
mem_funcs['numerical_data'] = {'L':
{'start' : -1, 'peak' : 0, 'end' :0.50},
'M':
{'start' : 0, 'peak' :0.50, 'end' :1.00},
'H':
{'start' :0.50, 'peak' :1.00, 'end' :2.00}}
output_funcs = {'L':
{'start' :-0.5, 'peak' : 0, 'end' :0.50},
'M':
{'start' : 0, 'peak' :0.50, 'end' :1.00},
'I':
{'start' :0.50, 'peak' :1.00, 'end' :1.50}}
def get_line(zero, peak):
k = 1/(peak-zero)
n = -k * zero
return {'k': k, 'n' : n}
def fuzzify_feature(val, feature):
ret_val = {}
for key in mem_funcs[feature]:
func = mem_funcs[feature][key]
if val < func['start'] or val > func['end']:
res = 0
else:
if val < func['peak']:
line = get_line(func['start'], func['peak'])
else:
line = get_line(func['end'], func['peak'])
res = line['k'] * val + line['n'];
ret_val[key] = res
return ret_val
def fuzzify_sentence(s):
ret_val = {}
for feature in s:
ret_val[feature] = fuzzify_feature(s[feature], feature)
return ret_val
def fuzzify_sentences(sentences):
fuzzified = []
for sentence in sentences:
fuzzified.append(fuzzify_sentence(sentence))
return fuzzified
def print_line(line):
print("(k, n) = (" + str(line['k']) + ", " + str(line['n']) + ")")
def print_info(info):
for sentence in info:
print("*******************");
for feature in sentence:
print(feature + ": " + str(sentence[feature]));
def get_max_rules(sentence):
max_rules = {'I' : 0, 'M' : 0, 'L' : 0}
fuzzified_sentence = fuzzify_sentence(sentence)
rule_results = rl.calculate_all_rules(fuzzified_sentence)
for rule_key in rule_results:
if max_rules[rule_key[0]] < rule_results[rule_key]:
max_rules[rule_key[0]] = rule_results[rule_key]
return max_rules
def get_output_function_val(key, x):
ofun = output_funcs[key]
if x < ofun['start'] or x > ofun['end']:
return 0
else:
if x < ofun['peak']:
line = get_line(ofun['start'], ofun['peak'])
else:
line = get_line(ofun['end'], ofun['peak'])
return line['k'] * x + line['n'];
def get_output_val(x, key, maximum):
return min(maximum, get_output_function_val(key,x))
def get_aggregated_value(x, max_rules):
output_vals = []
for key in max_rules:
output_vals.append(get_output_val(x, key, max_rules[key]))
return max(output_vals)
def center_of_gravity(max_rules):
dx = 0.01
x_vals = []
y_vals = []
integration_start = -0.4
integration_end = 1.4
x_vals = list(numpy.arange(integration_start, integration_end, dx))
for x in x_vals:
y_vals.append(get_aggregated_value(x, max_rules))
summ = 0
for i in range(0, len(y_vals)):
summ += y_vals[i] * x_vals[i]
return summ/sum(y_vals)
def get_fuzzy_rank(sentence):
max_rules = get_max_rules(sentence)
return center_of_gravity(max_rules)
def print_everything(almost_originals, sentences):
rank_results = get_fuzzy_ranks(sentences)
something = zip(almost_originals, rank_results)
rank_sort_results = sorted(something, key= lambda x: x[1][1], reverse=True)
for ranked_element in rank_sort_results:
sentence = ranked_element[1][0]
print("******************************")
print(ranked_element[0].original)
print("\nFeatures:")
fuzzified = fuzzify_sentence(sentence)
for key in fuzzified:
print("\t" + "%20s" % key + ": ", end = "")
for key2 in fuzzified[key]:
print(" %2s: " % key2 + "%.2f" % fuzzified[key][key2], end = "")
print("")
print("\nRules:")
rl.print_rules_results(fuzzify_sentence(sentence))
print("\nFinal value: " + "%.3f" % ranked_element[1][1])
print("Rank: (%d / %d)" % (1 + rank_sort_results.index(ranked_element),
len(rank_sort_results)))
print("")
def set_fuzzy_ranks(sentence_object, sentences):
for (sen_obj,sentence) in zip(sentence_object, sentences):
sen_obj.rank = get_fuzzy_rank(sentence)
def get_fuzzy_ranks(sentences):
ret_val = []
for sentence in sentences:
ret_val.append((sentence, get_fuzzy_rank(sentence)))
return ret_val
# MAIN:
# test_stuff()
# sentence1 = {'keyword' : 0.1, 'title_word' : 0.2,
# 'sentence_location': 0.3, 'sentence_length' : 0.4,
# 'proper_noun' : 0.5, 'cue_phrase' : 0.6,
# 'nonessential' : 0.7, 'numerical_data' : 0.8}
# sentence2 = {'keyword' : 0.8, 'title_word' : 0.7,
# 'sentence_location': 0.6, 'sentence_length' : 0.5,
# 'proper_noun' : 0.4, 'cue_phrase' : 0.3,
# 'nonessential' : 0.2, 'numerical_data' : 0.1}
# sentences = [sentence1, sentence2]
# output = fuzzify_sentences(sentences)
# print_info(output)
|
|
#!/usr/bin/env python
"""
Weboob main Python wrapper
This file is a wrapper around Weboob, which is spawned by Kresus backend and
prints fetched data as a JSON export on stdout, so that it could be imported
easily in Kresus' NodeJS backend.
..note:: Useful environment variables are
- ``WEBOOB_DIR`` to specify the path to the root Weboob folder (with
modules and Weboob code)
- ``KRESUS_DIR`` to specify the path to Kresus data dir.
- ``WEBOOB_SOURCES_LIST`` to specify a Weboob sources.list to use instead
of the default one.
Commands are read on standard input. Available commands are:
* ``version`` to get the Weboob version.
* ``test`` to test Weboob is installed and a working connector can be
built.
* ``update`` to update Weboob modules.
* ``accounts BANK LOGIN PASSWORD EXTRA_CONFIG`` to get accounts from bank
``BANK`` using the provided credentials and the given extra
configuration options for the Weboob module (passed as a JSON string).
* ``operations BANK LOGIN PASSWORD EXTRA_CONFIG`` to get a list of
operations from bank ``BANK`` using the provided credentials and given
extra configuration options.
"""
from __future__ import print_function, unicode_literals
import collections
import gc
import json
import logging
import os
import shlex
import shutil
import sys
import traceback
from datetime import datetime
def error(error_code, error_short, error_long):
"""
Log error, return error JSON on stdin and exit with non-zero error code.
:param error_code: Kresus-specific error code. See ``shared/errors.json``.
:param error_content: Error string.
"""
error_object = {
'error_code': error_code,
'error_short': error_short,
'error_message': "%s\n%s" % (error_short, error_long)
}
print(json.dumps(error_object))
sys.exit(1)
# Load errors description
ERRORS_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), # This script directory
'..', 'shared', 'errors.json'
)
with open(ERRORS_PATH, 'r') as f:
ERRORS = json.load(f)
ACTION_NEEDED = ERRORS['ACTION_NEEDED']
UNKNOWN_MODULE = ERRORS['UNKNOWN_WEBOOB_MODULE']
INVALID_PASSWORD = ERRORS['INVALID_PASSWORD']
EXPIRED_PASSWORD = ERRORS['EXPIRED_PASSWORD']
GENERIC_EXCEPTION = ERRORS['GENERIC_EXCEPTION']
INVALID_PARAMETERS = ERRORS['INVALID_PARAMETERS']
NO_ACCOUNTS = ERRORS['NO_ACCOUNTS']
WEBOOB_NOT_INSTALLED = ERRORS['WEBOOB_NOT_INSTALLED']
INTERNAL_ERROR = ERRORS['INTERNAL_ERROR']
# Import Weboob core
if 'WEBOOB_DIR' in os.environ and os.path.isdir(os.environ['WEBOOB_DIR']):
sys.path.append(os.environ['WEBOOB_DIR'])
try:
from weboob.capabilities.base import empty
from weboob.core import Weboob
from weboob.exceptions import (
ActionNeeded,
BrowserIncorrectPassword,
BrowserPasswordExpired,
NoAccountsException,
ModuleInstallError,
ModuleLoadError
)
from weboob.tools.backend import Module
from weboob.tools.compat import unicode
from weboob.tools.log import createColoredFormatter
except ImportError as exc:
error(
WEBOOB_NOT_INSTALLED,
('Is weboob correctly installed? Unknown exception raised: %s.' %
unicode(exc)),
traceback.format_exc()
)
def init_logging(level=logging.WARNING):
"""
Initialize loggers.
:param level: Minimal severity to log.
"""
root_logger = logging.getLogger()
root_logger.setLevel(level)
handler = logging.StreamHandler(sys.stderr)
fmt = '%(asctime)s:%(levelname)s:%(name)s:%(filename)s:%(lineno)d:%(funcName)s %(message)s'
if os.environ.get('NODE_ENV', 'production') != 'production':
# Only output colored logging if not running in production.
handler.setFormatter(createColoredFormatter(sys.stderr, fmt))
else:
handler.setFormatter(logging.Formatter(fmt))
root_logger.addHandler(handler)
class DummyProgress(object):
"""
Dummy progressbar, to hide it when installing the module.
.. note:: Taken from Weboob code.
"""
def progress(self, *args, **kwargs):
pass
def prompt(self, message):
logging.info(message)
return True
class Connector(object):
"""
Connector is a tool that connects to common websites like bank website,
phone operator website... and that grabs personal data from there.
Credentials are required to make this operation.
Technically, connectors are weboob backend wrappers.
"""
@staticmethod
def version():
"""
Get the version of the installed Weboob.
"""
return Weboob.VERSION
def __init__(self, weboob_data_path):
"""
Create a Weboob instance.
:param weboob_data_path: Weboob path to use.
"""
if not os.path.isdir(weboob_data_path):
os.makedirs(weboob_data_path)
# Set weboob data directory and sources.list file.
self.weboob_data_path = weboob_data_path
self.write_weboob_sources_list()
# Create a Weboob object.
self.weboob = Weboob(workdir=weboob_data_path,
datadir=weboob_data_path)
self.backends = collections.defaultdict(dict)
# Force Weboob update, to ensure the new sources.list is taken into
# account.
self.update()
def write_weboob_sources_list(self):
"""
Ensure the Weboob sources.list file contains the required entries from
Kresus.
"""
sources_list_path = os.path.join(
self.weboob_data_path, 'sources.list'
)
if (
'WEBOOB_SOURCES_LIST' in os.environ and
os.path.isfile(os.environ['WEBOOB_SOURCES_LIST'])
):
# Copy specified sources list file to Weboob data directory.
shutil.copyfile(
os.environ['WEBOOB_SOURCES_LIST'],
sources_list_path
)
else:
# Here is the list of mandatory lines in the sources.list file, as
# required by Kresus.
sources_list_lines = [
'https://updates.weboob.org/%(version)s/main/',
(
'file://%s/fakemodules/' % (
os.path.dirname(os.path.abspath(__file__))
)
)
]
# Get sources.list lines.
with open(sources_list_path, 'w') as sources_list_file:
sources_list_file.write('\n'.join(sources_list_lines))
def update(self):
"""
Update Weboob modules.
"""
# Weboob has an offending print statement when it "Rebuilds index",
# which happen at every run if the user has a local repository. We need
# to silence it, hence the temporary redirect of stdout.
sys.stdout = open(os.devnull, "w")
try:
self.weboob.update(progress=DummyProgress())
except:
# Try to remove the data directory, to see if it changes a thing.
# This is especially useful when a new version of Weboob is
# published and/or the keyring changes.
shutil.rmtree(self.weboob_data_path)
os.makedirs(self.weboob_data_path)
# Recreate the Weboob object as the directories are created
# on creating the Weboob object.
self.weboob = Weboob(workdir=self.weboob_data_path,
datadir=self.weboob_data_path)
# Rewrite sources.list file
self.write_weboob_sources_list()
# Retry update
self.weboob.update(progress=DummyProgress())
finally:
# Restore stdout
sys.stdout = sys.__stdout__
def create_backend(self, modulename, parameters):
"""
Create a Weboob backend for a given module, ready to be used to fetch
data.
:param modulename: The name of the module from which backend should be
created.
:param parameters: A dict of parameters to pass to the module. It
should at least contain ``login`` and ``password`` fields, but can
contain additional values depending on the module.
"""
# Install the module if required.
repositories = self.weboob.repositories
minfo = repositories.get_module_info(modulename)
if (
minfo is not None and not minfo.is_installed() and
not minfo.is_local()
):
# We cannot install a locally available module, this would
# result in a ModuleInstallError.
try:
repositories.install(minfo, progress=DummyProgress())
except ModuleInstallError as exc:
error(
GENERIC_EXCEPTION,
"Unable to install module %s." % bank_module,
traceback.format_exc()
)
# Initialize the backend.
login = parameters['login']
self.backends[modulename][login] = self.weboob.build_backend(
modulename,
parameters
)
def delete_backend(self, modulename, login=None):
"""
Delete a created backend for the given module.
:param modulename: The name of the module from which backend should be
deleted.
:param login: An optional login to delete only a specific backend.
Otherwise delete all the backends from the given module name.
"""
def _deinit_backend(backend):
"""
Deinitialize a given Weboob loaded backend object.
"""
# This code comes directly from Weboob core code. As we are
# building backends on our side, we are responsible for
# deinitialization.
with backend:
backend.deinit()
try:
# Deinit matching backend objects and remove them from loaded
# backends dict.
if login:
_deinit_backend(self.backends[modulename][login])
del self.backends[modulename][login]
else:
for backend in self.backends:
_deinit_backend(backend[modulename])
del self.backends[modulename]
gc.collect() # Force GC collection, better than nothing.
except KeyError:
logging.warn(
'No matching backends for module %s and login %s.',
modulename, login
)
def get_all_backends(self):
"""
Get all the available built backends.
:returns: A list of backends.
"""
backends = []
for modules_backends in self.backends.values():
backends.extend(modules_backends.values())
return backends
def get_bank_backends(self, modulename):
"""
Get all the built backends for a given bank module.
:param modulename: The name of the module from which the backend should
be created.
:returns: A list of backends.
"""
if modulename in self.backends:
return self.backends[modulename].values()
else:
logging.warn(
'No matching built backends for bank module %s.',
modulename
)
return []
def get_backend(self, modulename, login):
"""
Get a specific backend associated to a specific login with a specific
bank module.
:param modulename: The name of the module from which the backend should
be created.
:param login: The login to further filter on the available backends.
:returns: A list of backends (with a single item).
"""
if not modulename:
# Module name is mandatory in this case.
logging.error('Missing bank module name.')
return []
if modulename in self.backends and login in self.backends[modulename]:
return [self.backends[modulename][login]]
else:
logging.warn(
'No matching built backends for bank module %s with login %s.',
modulename, login
)
return []
def get_backends(self, modulename=None, login=None):
"""
Get a list of backends matching criterions.
:param modulename: The name of the module from which the backend should
be created.
:param login: The login to further filter on the available backends. If
passed, ``modulename`` cannot be empty.
:returns: A list of backends.
"""
if login:
# If login is provided, only return backends matching the
# module name and login (at most one).
return self.get_backend(modulename, login)
elif modulename:
# If only modulename is provided, returns all matching
# backends.
return self.get_bank_backends(modulename)
else:
# Just return all available backends.
return self.get_all_backends()
@staticmethod
def get_accounts(backend):
"""
Fetch accounts data from Weboob.
:param backend: The Weboob built backend to fetch data from.
:returns: A list of dicts representing the available accounts.
"""
results = []
for account in backend.iter_accounts():
iban = None
if not empty(account.iban):
iban = account.iban
currency = None
if not empty(account.currency):
currency = unicode(account.currency)
results.append({
'accountNumber': account.id,
'label': account.label,
'balance': unicode(account.balance),
'iban': iban,
'currency': currency
})
return results
@staticmethod
def get_operations(backend):
"""
Fetch operations data from Weboob.
:param backend: The Weboob built backend to fetch data from.
:returns: A list of dicts representing the available operations.
"""
results = []
for account in list(backend.iter_accounts()):
# Get operations for all accounts available.
try:
history = backend.iter_history(account)
# Build an operation dict for each operation.
for line in history:
# Handle date
if line.rdate:
# Use date of the payment (real date) if available.
date = line.rdate
elif line.date:
# Otherwise, use debit date, on the bank statement.
date = line.date
else:
logging.error(
'No known date property in operation line: %s.',
unicode(line.raw)
)
date = datetime.now()
if line.label:
title = unicode(line.label)
else:
title = unicode(line.raw)
isodate = date.isoformat()
debit_date = line.date.isoformat()
results.append({
'account': account.id,
'amount': unicode(line.amount),
'raw': unicode(line.raw),
'type': line.type,
'date': isodate,
'debit_date': debit_date,
'title': title
})
except NotImplementedError:
# Weboob raises a NotImplementedError upon iteration, not upon
# method call. Hence, this exception should wrap the whole
# iteration.
logging.error(
('This account type has not been implemented by '
'weboob: %s.'),
account.id
)
return results
def fetch(self, which, modulename=None, login=None):
"""
Wrapper to fetch data from the Weboob connector.
This wrapper fetches the required data from Weboob and returns it. It
handles the translation between Weboob exceptions and Kresus error
codes stored in the JSON response.
:param which: The type of data to fetch. Can be either ``accounts`` or
``operations``.
:param modulename: The name of the module from which data should be
fetched. Optional, if not provided all available backends are used.
:param login: The login to further filter on the available backends.
Optional, if not provided all matching backends are used.
:returns: A dict of the fetched data, in a ``values`` keys. Errors are
described under ``error_code``, ``error_short`` and ``error_content``
keys.
"""
results = {}
try:
results['values'] = []
backends = self.get_backends(modulename, login)
if which == 'accounts':
fetch_function = self.get_accounts
elif which == 'operations':
fetch_function = self.get_operations
else:
raise Exception('Invalid fetch command.')
for backend in backends:
with backend: # Acquire lock on backend
results['values'].extend(fetch_function(backend))
except ActionNeeded as exc:
results['error_code'] = ACTION_NEEDED
results['error_content'] = unicode(exc)
except NoAccountsException:
results['error_code'] = NO_ACCOUNTS
except ModuleLoadError:
results['error_code'] = UNKNOWN_MODULE
except BrowserPasswordExpired:
results['error_code'] = EXPIRED_PASSWORD
except BrowserIncorrectPassword:
# This `except` clause is not in alphabetic order and cannot be.
# This is due to the fact that BrowserPasswordExpired inherits from
# BrowserIncorrectPassword in Weboob 1.3.
results['error_code'] = INVALID_PASSWORD
except Module.ConfigError as exc:
results['error_code'] = INVALID_PARAMETERS
results['error_content'] = unicode(exc)
except Exception as exc:
error(
GENERIC_EXCEPTION,
'Unkown error: %s.' % unicode(exc),
traceback.format_exc()
)
return results
if __name__ == '__main__':
# Parse command from standard input.
stdin = shlex.split(sys.stdin.readline()) # Split according to shell rules
command, other_args = stdin[0], stdin[1:]
# Handle logging
if '--debug' in other_args:
init_logging(logging.DEBUG)
# Strip it from other args, to handle this list in a uniform way
# wether we are in debug mode or not.
del other_args[other_args.index('--debug')]
else:
init_logging()
# Build a Weboob connector.
try:
weboob_connector = Connector(
weboob_data_path=os.path.join(
os.environ.get('KRESUS_DIR', '.'),
'weboob-data'
)
)
except Exception as exc:
error(
WEBOOB_NOT_INSTALLED,
('Is weboob installed? Unknown exception raised: %s.' %
unicode(exc)),
traceback.format_exc()
)
# Handle the command and output the expected result on standard output, as
# JSON encoded string.
if command == 'test':
# Do nothing, just check we arrived so far.
print(json.dumps({}))
elif command == 'version':
# Return Weboob version.
obj = {
'values': weboob_connector.version()
}
print(json.dumps(obj))
elif command == 'update':
# Update Weboob modules.
try:
weboob_connector.update()
print(json.dumps({}))
except Exception as exc:
error(
GENERIC_EXCEPTION,
'Exception when updating weboob: %s.' % unicode(exc),
traceback.format_exc()
)
elif command in ['accounts', 'operations']:
# Fetch accounts.
if len(other_args) < 3:
# Check all the arguments are passed.
error(
INTERNAL_ERROR,
'Missing arguments for %s command.' % command,
None
)
# Format parameters for the Weboob connector.
bank_module = other_args[0]
custom_fields = []
if len(other_args) > 3:
try:
custom_fields = json.loads(other_args[3])
except ValueError:
error(
INTERNAL_ERROR,
'Invalid JSON custom fields: %s.' % other_args[3],
None
)
params = {
'login': other_args[1],
'password': other_args[2],
}
for f in custom_fields:
params[f['name']] = f['value']
# Create a Weboob backend, fetch data and delete the module.
try:
weboob_connector.create_backend(bank_module, params)
except ModuleLoadError as exc:
error(
GENERIC_EXCEPTION,
"Unable to load module %s." % bank_module,
traceback.format_exc()
)
content = weboob_connector.fetch(command)
weboob_connector.delete_backend(bank_module, login=params['login'])
# Output the fetched data as JSON.
print(json.dumps(content))
else:
# Unknown commands, send an error.
error(
GENERIC_EXCEPTION,
"Unknown command '%s'." % command,
None
)
|
|
from entity import *
from euclid import Vector3, Quaternion
from math import sin, cos, atan2, fabs, degrees
import new
import ode
class CollisionMask:
team = 0x1
weapon = 0x2
turret = 0x4
fighter = 0x8
ship = 0x16
all = 0xffffffff
class _Physics:
def __init__(self):
self.Nodes = {}
self.world = ode.World()
self.world.setERP(0.2)
self.world.setCFM(0.0001)
self.world.setLinearDamping(0.1)
self.world.setAngularDamping(0.1)
self.space = ode.Space(1)
self.contactgroup = ode.JointGroup()
self.last_step = 0.0
World.add_handlers(self.on_update, self.on_add, self.on_remove)
class PhysicsNode:
def __init__(self, entity, world, space):
if not entity.has('position'):
entity.position = Vector3()
if not entity.has('rotation'):
entity.rotation = Quaternion()
if not entity.has('linear_velocity'):
entity.linear_velocity = Vector3()
if not entity.has('angular_velocity'):
entity.angular_velocity = Quaternion()
if not entity.has('throttle'):
entity.throttle = 0.0
if not entity.has('_yaw'):
entity._yaw = 0.0
if not entity.has('_pitch'):
entity._pitch = 0.0
if not entity.has('_roll'):
entity._roll = 0.0
if not entity.has('max_velocity'):
entity.max_velocity = 0.0
if not entity.has('min_acceleration'):
entity.min_acceleration = 0.0
if not entity.has('max_acceleration'):
entity.max_acceleration = 0.0
if not entity.has('turn_rate'):
entity.turn_rate = 0.0
if not entity.has('mass'):
entity.mass = 1.0
if not entity.has('extents'):
entity.extents = Vector3(1, 1, 1)
if not entity.has('category_mask'):
entity.category_mask = 0xffffffff
if not entity.has('collide_mask'):
entity.collide_mask = 0xffffffff
if not entity.has('remove_on_collide'):
entity.remove_on_collide = False
def yaw(self, dir):
self._yaw = dir
entity.yaw_left = new.instancemethod(lambda s: yaw(s,1), entity, Entity)
entity.yaw_right = new.instancemethod(lambda s: yaw(s, -1), entity, Entity)
def pitch(self, dir):
self._pitch = dir
entity.pitch_up = new.instancemethod(lambda s: pitch(s,-1), entity, Entity)
entity.pitch_down = new.instancemethod(lambda s: pitch(s, 1), entity, Entity)
def roll(self, dir):
self._roll = dir
entity.roll_left = new.instancemethod(lambda s: roll(s,-1), entity, Entity)
entity.roll_right = new.instancemethod(lambda s: roll(s, 1), entity, Entity)
def turn_towards(self, dir):
Physics.Nodes[self].body.addTorque(dir*entity.turn_rate)
entity.turn_towards = new.instancemethod(turn_towards, entity, Entity)
def set_pos(self, pos):
self.position = pos
entity.warp = new.instancemethod(set_pos, entity, Entity)
self.body = ode.Body(world)
self.body.entity = entity
M = ode.Mass()
M.setBoxTotal(entity.mass, *entity.extents)
self.body.setMass(M)
if entity.has('model'):
self.geom = ode.GeomTriMesh(entity.model.drawable.trimesh, space)
else:
self.geom = ode.GeomBox(space, entity.extents)
self.geom.setBody(self.body)
self.geom.setCategoryBits(entity.category_mask)
self.geom.setCollideBits(entity.collide_mask)
self.body.setPosition(entity.position)
self.body.setLinearVel(entity.linear_velocity)
angle, axis = entity.angular_velocity.get_angle_axis()
self.body.setAngularVel(axis*angle)
m = entity.rotation.get_matrix()
self.body.setRotation([m.a, m.b, m.c, m.e, m.f, m.g, m.i, m.j, m.k])
def update(self, entity):
entity.position = Vector3(*self.body.getPosition())
entity.rotation = Quaternion(*self.body.getQuaternion())
linear = self.body.getLinearVel()
entity.linear_velocity = Vector3(*linear)
angular = Vector3(*self.body.getLinearVel())
entity.angular_velocity = Quaternion.new_rotate_axis(abs(angular), angular)
thrust = entity.min_acceleration + entity.throttle*(entity.max_acceleration - entity.min_acceleration)
self.body.addRelForce(Vector3(0, 0, 1)*thrust)
self.body.addRelTorque(Vector3(entity._pitch, entity._yaw, entity._roll)*entity.turn_rate)
entity._yaw, entity._pitch, entity._roll = 0, 0, 0
entity._thrust = 0
def on_add(self, entity):
if entity.has('physics') and entity.physics == True:
self.Nodes[entity] = self.PhysicsNode(entity, self.world, self.space)
def on_remove(self, entity):
if entity in self.Nodes:
del self.Nodes[entity]
def collision_callback(self, args, geom1, geom2):
# Check if the objects do collide
contacts = ode.collide(geom1, geom2)
if len(contacts):
b1, b2 = geom1.getBody(), geom2.getBody()
#print 'collision between', b1.entity.name, 'and', b2.entity.name, len(contacts), 'contacts'
# Create contact joints
world, contactgroup = args
for c in contacts:
c.setMu(1000.0)
#c.setBounce(0.9)
#c.setMode(ode.ContactBounce)
j = ode.ContactJoint(world, contactgroup, c)
j.attach(b1, b2)
if b1.entity.has('health') and b1.entity.health:
b1.entity.deal_damage(b2.entity)
if b2.entity.has('health') and b2.entity.health:
b2.entity.deal_damage(b1.entity)
def on_update(self, dt):
for k, v in self.Nodes.iteritems():
v.update(k)
step_size = 1.0/60.0
dt += self.last_step
while dt >= step_size:
self.space.collide((self.world, self.contactgroup), self.collision_callback)
self.world.step(step_size)
self.contactgroup.empty()
dt -= step_size
self.last_step = dt
Physics = _Physics()
del _Physics
class FixedJoint:
def __init__(self, entity1, entity2):
self.joint = ode.FixedJoint(Physics.world)
self.joint.attach(Physics.Nodes[entity1].body, Physics.Nodes[entity2].body)
self.joint.setFixed()
class BallJoint:
def __init__(self, entity1, entity2, anchor):
self.joint = ode.BallJoint(Physics.world)
self.joint.attach(Physics.Nodes[entity1].body, Physics.Nodes[entity2].body)
self.joint.setAnchor(anchor)
class UniversalJoint:
def __init__(self, entity1, entity2, anchor, axis1, axis2):
self.joint = ode.UniversalJoint(Physics.world)
self.joint.attach(Physics.Nodes[entity1].body, Physics.Nodes[entity2].body)
self.joint.setAnchor(anchor)
self.joint.setAxis1(axis1)
self.joint.setAxis2(axis2)
class Hinge2Joint:
def __init__(self, entity1, entity2, anchor, axis1, axis2, limitLo, limitHi):
self.joint = ode.Hinge2Joint(Physics.world)
self.joint.attach(Physics.Nodes[entity1].body, Physics.Nodes[entity2].body)
self.joint.setAnchor(anchor)
self.joint.setAxis1(axis1)
self.joint.setAxis2(axis2)
self.joint.setParam(ode.ParamLoStop, limitLo)
self.joint.setParam(ode.ParamHiStop, limitHi)
|
|
import numpy as np
from scipy.special import factorial
from scipy._lib._util import _asarray_validated, float_factorial
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator",
"barycentric_interpolate", "approximate_taylor_polynomial"]
def _isscalar(x):
"""Check whether x is if a scalar type, or 0-dim"""
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
class _Interpolator1D(object):
"""
Common features in univariate interpolation
Deal with input data type and interpolation axis rolling. The
actual interpolator can assume the y-data is of shape (n, r) where
`n` is the number of x-points, and `r` the number of variables,
and use self.dtype as the y-data type.
Attributes
----------
_y_axis
Axis along which the interpolation goes in the original array
_y_extra_shape
Additional trailing shape of the input arrays, excluding
the interpolation axis.
dtype
Dtype of the y-data arrays. Can be set via _set_dtype, which
forces it to be float or complex.
Methods
-------
__call__
_prepare_x
_finish_y
_reshape_yi
_set_yi
_set_dtype
_evaluate
"""
__slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
def __init__(self, xi=None, yi=None, axis=None):
self._y_axis = axis
self._y_extra_shape = None
self.dtype = None
if yi is not None:
self._set_yi(yi, xi=xi, axis=axis)
def __call__(self, x):
"""
Evaluate the interpolant
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Input values `x` must be convertible to `float` values like `int`
or `float`.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate(x)
return self._finish_y(y, x_shape)
def _evaluate(self, x):
"""
Actually evaluate the value of the interpolator.
"""
raise NotImplementedError()
def _prepare_x(self, x):
"""Reshape input x array to 1-D"""
x = _asarray_validated(x, check_finite=False, as_inexact=True)
x_shape = x.shape
return x.ravel(), x_shape
def _finish_y(self, y, x_shape):
"""Reshape interpolated y back to an N-D array similar to initial y"""
y = y.reshape(x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = (list(range(nx, nx + self._y_axis))
+ list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
y = y.transpose(s)
return y
def _reshape_yi(self, yi, check=False):
yi = np.rollaxis(np.asarray(yi), self._y_axis)
if check and yi.shape[1:] != self._y_extra_shape:
ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:],
self._y_extra_shape[:-self._y_axis])
raise ValueError("Data must be of shape %s" % ok_shape)
return yi.reshape((yi.shape[0], -1))
def _set_yi(self, yi, xi=None, axis=None):
if axis is None:
axis = self._y_axis
if axis is None:
raise ValueError("no interpolation axis specified")
yi = np.asarray(yi)
shape = yi.shape
if shape == ():
shape = (1,)
if xi is not None and shape[axis] != len(xi):
raise ValueError("x and y arrays must be equal in length along "
"interpolation axis.")
self._y_axis = (axis % yi.ndim)
self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:]
self.dtype = None
self._set_dtype(yi.dtype)
def _set_dtype(self, dtype, union=False):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.dtype, np.complexfloating):
self.dtype = np.complex_
else:
if not union or self.dtype != np.complex_:
self.dtype = np.float_
class _Interpolator1DWithDerivatives(_Interpolator1D):
def derivatives(self, x, der=None):
"""
Evaluate many derivatives of the polynomial at the point x
Produce an array of all derivative values at the point x.
Parameters
----------
x : array_like
Point or points at which to evaluate the derivatives
der : int or None, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points). This number includes the function value as 0th
derivative.
Returns
-------
d : ndarray
Array with derivatives; d[j] contains the jth derivative.
Shape of d[j] is determined by replacing the interpolation
axis in the original array with the shape of x.
Examples
--------
>>> from scipy.interpolate import KroghInterpolator
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
array([1.0,2.0,3.0])
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
array([[1.0,1.0],
[2.0,2.0],
[3.0,3.0]])
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der)
y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+ list(range(1, nx+1)) +
list(range(nx+1+self._y_axis, nx+ny+1)))
y = y.transpose(s)
return y
def derivative(self, x, der=1):
"""
Evaluate one derivative of the polynomial at the point x
Parameters
----------
x : array_like
Point or points at which to evaluate the derivatives
der : integer, optional
Which derivative to extract. This number includes the
function value as 0th derivative.
Returns
-------
d : ndarray
Derivative interpolated at the x-points. Shape of d is
determined by replacing the interpolation axis in the
original array with the shape of x.
Notes
-----
This is computed by evaluating all derivatives up to the desired
one (using self.derivatives()) and then discarding the rest.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der+1)
return self._finish_y(y[der], x_shape)
class KroghInterpolator(_Interpolator1DWithDerivatives):
"""
Interpolating polynomial for a set of points.
The polynomial passes through all the pairs (xi,yi). One may
additionally specify a number of derivatives at each point xi;
this is done by repeating the value xi and specifying the
derivatives as successive yi values.
Allows evaluation of the polynomial and all its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Parameters
----------
xi : array_like, length N
Known x-coordinates. Must be sorted in increasing order.
yi : array_like
Known y-coordinates. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on [1]_.
References
----------
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation", 1970.
Examples
--------
To produce a polynomial that is zero at 0 and 1 and has
derivative 2 at 0, call
>>> from scipy.interpolate import KroghInterpolator
>>> KroghInterpolator([0,0,1],[0,2,0])
This constructs the quadratic 2*X**2-2*X. The derivative condition
is indicated by the repeated zero in the xi array; the corresponding
yi values are 0, the function value, and 2, the derivative value.
For another example, given xi, yi, and a derivative ypi for each
point, appropriate arrays can be constructed as:
>>> xi = np.linspace(0, 1, 5)
>>> yi, ypi = np.random.rand(2, 5)
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
>>> KroghInterpolator(xi_k, yi_k)
To produce a vector-valued polynomial, supply a higher-dimensional
array for yi:
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
"""
def __init__(self, xi, yi, axis=0):
_Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)
self.xi = np.asarray(xi)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
c = np.zeros((self.n+1, self.r), dtype=self.dtype)
c[0] = self.yi[0]
Vk = np.zeros((self.n, self.r), dtype=self.dtype)
for k in range(1, self.n):
s = 0
while s <= k and xi[k-s] == xi[k]:
s += 1
s -= 1
Vk[0] = self.yi[k]/float_factorial(s)
for i in range(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s == 0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def _evaluate(self, x):
pi = 1
p = np.zeros((len(x), self.r), dtype=self.dtype)
p += self.c[0,np.newaxis,:]
for k in range(1, self.n):
w = x - self.xi[k-1]
pi = w*pi
p += pi[:,np.newaxis] * self.c[k]
return p
def _evaluate_derivatives(self, x, der=None):
n = self.n
r = self.r
if der is None:
der = self.n
pi = np.zeros((n, len(x)))
w = np.zeros((n, len(x)))
pi[0] = 1
p = np.zeros((len(x), self.r), dtype=self.dtype)
p += self.c[0, np.newaxis, :]
for k in range(1, n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1] * pi[k-1]
p += pi[k, :, np.newaxis] * self.c[k]
cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
cn[0] = p
for k in range(1, n):
for i in range(1, n-k+1):
pi[i] = w[k+i-1]*pi[i-1] + pi[i]
cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
cn[k] *= float_factorial(k)
cn[n, :, :] = 0
return cn[:der]
def krogh_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for polynomial interpolation.
See `KroghInterpolator` for more details.
Parameters
----------
xi : array_like
Known x-coordinates.
yi : array_like
Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
vectors of length R, or scalars if R=1.
x : array_like
Point or points at which to evaluate the derivatives.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
d : ndarray
If the interpolator's values are R-D then the
returned array will be the number of derivatives by N by R.
If `x` is a scalar, the middle dimension will be dropped; if
the `yi` are scalars then the last dimension will be dropped.
See Also
--------
KroghInterpolator : Krogh interpolator
Notes
-----
Construction of the interpolating polynomial is a relatively expensive
process. If you want to evaluate it repeatedly consider using the class
KroghInterpolator (which is what this function uses).
Examples
--------
We can interpolate 2D observed data using krogh interpolation:
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import krogh_interpolate
>>> x_observed = np.linspace(0.0, 10.0, 11)
>>> y_observed = np.sin(x_observed)
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
>>> y = krogh_interpolate(x_observed, y_observed, x)
>>> plt.plot(x_observed, y_observed, "o", label="observation")
>>> plt.plot(x, y, label="krogh interpolation")
>>> plt.legend()
>>> plt.show()
"""
P = KroghInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
"""
Estimate the Taylor polynomial of f at x by polynomial fitting.
Parameters
----------
f : callable
The function whose Taylor polynomial is sought. Should accept
a vector of `x` values.
x : scalar
The point at which the polynomial is to be evaluated.
degree : int
The degree of the Taylor polynomial
scale : scalar
The width of the interval to use to evaluate the Taylor polynomial.
Function values spread over a range this wide are used to fit the
polynomial. Must be chosen carefully.
order : int or None, optional
The order of the polynomial to be used in the fitting; `f` will be
evaluated ``order+1`` times. If None, use `degree`.
Returns
-------
p : poly1d instance
The Taylor polynomial (translated to the origin, so that
for example p(0)=f(x)).
Notes
-----
The appropriate choice of "scale" is a trade-off; too large and the
function differs from its Taylor polynomial too much to get a good
answer, too small and round-off errors overwhelm the higher-order terms.
The algorithm used becomes numerically unstable around order 30 even
under ideal circumstances.
Choosing order somewhat larger than degree may improve the higher-order
terms.
Examples
--------
We can calculate Taylor approximation polynomials of sin function with
various degrees:
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import approximate_taylor_polynomial
>>> x = np.linspace(-10.0, 10.0, num=100)
>>> plt.plot(x, np.sin(x), label="sin curve")
>>> for degree in np.arange(1, 15, step=2):
... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
... order=degree + 2)
... plt.plot(x, sin_taylor(x), label=f"degree={degree}")
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
... borderaxespad=0.0, shadow=True)
>>> plt.tight_layout()
>>> plt.axis([-10, 10, -10, 10])
>>> plt.show()
"""
if order is None:
order = degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(_Interpolator1D):
"""The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points.
Allows evaluation of the polynomial, efficient changing of the y
values to be interpolated, and updating by adding more x values.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial.
The values yi need to be provided before the function is
evaluated, but none of the preprocessing depends on them, so rapid
updates are possible.
Parameters
----------
xi : array_like
1-D array of x coordinates of the points the polynomial
should pass through
yi : array_like, optional
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later via the `set_y` method.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
This class uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
"""
def __init__(self, xi, yi=None, axis=0):
_Interpolator1D.__init__(self, xi, yi, axis)
self.xi = np.asfarray(xi)
self.set_yi(yi)
self.n = len(self.xi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in range(1, self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def set_yi(self, yi, axis=None):
"""
Update the y values to be interpolated
The barycentric interpolation algorithm requires the calculation
of weights, but these depend only on the xi. The yi can be changed
at any time.
Parameters
----------
yi : array_like
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
"""
if yi is None:
self.yi = None
return
self._set_yi(yi, xi=self.xi, axis=axis)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
def add_xi(self, xi, yi=None):
"""
Add more x values to the set to be interpolated
The barycentric interpolation algorithm allows easy updating by
adding more points for the polynomial to pass through.
Parameters
----------
xi : array_like
The x coordinates of the points that the polynomial should pass
through.
yi : array_like, optional
The y coordinates of the points the polynomial should pass through.
Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
vector-valued.
If `yi` is not given, the y values will be supplied later. `yi` should
be given if and only if the interpolator has y values specified.
"""
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = self._reshape_yi(yi, check=True)
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi **= -1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in range(old_n, self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def __call__(self, x):
"""Evaluate the interpolating polynomial at the points x
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Currently the code computes an outer product between x and the
weights, that is, it constructs an intermediate array of size
N by len(x), where N is the degree of the polynomial.
"""
return _Interpolator1D.__call__(self, x)
def _evaluate(self, x):
if x.size == 0:
p = np.zeros((0, self.r), dtype=self.dtype)
else:
c = x[...,np.newaxis]-self.xi
z = c == 0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[...,np.newaxis]
# Now fix where x==some xi
r = np.nonzero(z)
if len(r) == 1: # evaluation at a scalar
if len(r[0]) > 0: # equals one of the points
p = self.yi[r[0][0]]
else:
p[r[:-1]] = self.yi[r[-1]]
return p
def barycentric_interpolate(xi, yi, x, axis=0):
"""
Convenience function for polynomial interpolation.
Constructs a polynomial that passes through a given set of points,
then evaluates the polynomial. For reasons of numerical stability,
this function does not compute the coefficients of the polynomial.
This function uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the `x` coordinates are chosen very
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Parameters
----------
xi : array_like
1-D array of x coordinates of the points the polynomial should
pass through
yi : array_like
The y coordinates of the points the polynomial should pass through.
x : scalar or array_like
Points to evaluate the interpolator at.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
y : scalar or array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
See Also
--------
BarycentricInterpolator : Bary centric interpolator
Notes
-----
Construction of the interpolation weights is a relatively slow process.
If you want to call this many times with the same xi (but possibly
varying yi or x) you should use the class `BarycentricInterpolator`.
This is what this function uses internally.
Examples
--------
We can interpolate 2D observed data using barycentric interpolation:
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import barycentric_interpolate
>>> x_observed = np.linspace(0.0, 10.0, 11)
>>> y_observed = np.sin(x_observed)
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
>>> y = barycentric_interpolate(x_observed, y_observed, x)
>>> plt.plot(x_observed, y_observed, "o", label="observation")
>>> plt.plot(x, y, label="barycentric interpolation")
>>> plt.legend()
>>> plt.show()
"""
return BarycentricInterpolator(xi, yi, axis=axis)(x)
|
|
import sys
import operator
import pytest
import ctypes
import gc
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
@pytest.mark.parametrize("dtype",
['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
assert_raises(ValueError, make_dtype, -10)
# no errors here:
dt = make_dtype(np.uint32(0))
np.zeros(1, dtype=dt)[0].item()
def test_fields_by_index(self):
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
assert_dtype_equal(dt[0], np.dtype(np.int8))
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
assert_dtype_equal(dt[-1], dt[1])
assert_dtype_equal(dt[-2], dt[0])
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
assert_equal(dt[1], dt[np.int8(1)])
@pytest.mark.parametrize('align_flag',[False, True])
def test_multifield_index(self, align_flag):
# indexing with a list produces subfields
# the align flag should be preserved
dt = np.dtype([
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
], align=align_flag)
dt_sub = dt[['B', 'col1']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B', 'col1'],
'formats': ['<f8', '<U20'],
'offsets': [88, 0],
'titles': [None, 'title'],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[['B']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B'],
'formats': ['<f8'],
'offsets': [88],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[[]]
assert_equal(
dt_sub,
np.dtype({
'names': [],
'formats': [],
'offsets': [],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
assert_raises(TypeError, operator.getitem, dt, ())
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
assert_raises(KeyError, operator.getitem, dt, ['fake'])
assert_raises(KeyError, operator.getitem, dt, ['title'])
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
def test_partial_dict(self):
# 'names' is missing
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
d = np.dtype((np.dtype([]), 10))
assert_equal(d.shape, (10,))
assert_equal(d.itemsize, 0)
assert_equal(d.base, np.dtype([]))
arr = np.fromiter((() for i in range(10)), [])
assert_equal(arr.dtype, np.dtype([]))
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
assert_equal(np.frombuffer(b'', dtype=[], count=2),
np.empty(2, dtype=[]))
assert_raises(ValueError, np.dtype, ([], 'f8'))
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
np.ones(2, dtype=bool))
assert_equal(np.zeros((1, 2), dtype=[]) == a,
np.ones((1, 2), dtype=bool))
class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
# FutureWarning during deprecation period; after it is passed this
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
with pytest.warns(FutureWarning):
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike:
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_matches_ndim(self):
dt = np.dtype([('a', 'f4', ())])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4')])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4', 4)])
assert_equal(dt['a'].shape, (4,))
assert_equal(dt['a'].ndim, 1)
dt = np.dtype([('a', 'f4', (1, 2, 3))])
assert_equal(dt['a'].shape, (1, 2, 3))
assert_equal(dt['a'].ndim, 3)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
#Check that subarrays are aligned
t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
def iter_struct_object_dtypes():
"""
Iterates over a few complex dtypes and object pattern which
fill the array with a given object (defaults to a singleton).
Yields
------
dtype : dtype
pattern : tuple
Structured tuple for use with `np.array`.
count : int
Number of objects stored in the dtype.
singleton : object
A singleton object. The returned pattern is constructed so that
all objects inside the datatype are set to the singleton.
"""
obj = object()
dt = np.dtype([('b', 'O', (2, 3))])
p = ([[obj] * 3] * 2,)
yield pytest.param(dt, p, 6, obj, id="<subarray>")
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
p = (0, [[obj] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
p = (0, [[(obj, 0)] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
p = (0, [[(obj, obj)] * 3] * 2)
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestStructuredObjectRefcounting:
"""These tests cover various uses of complicated structured types which
include objects and thus require reference counting.
"""
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
pytest.param(np.empty, None,
# None is probably used for too many things
marks=pytest.mark.skip("unreliable due to python's behaviour")),
(np.ones, 1),
(np.zeros, 0)])
def test_structured_object_create_delete(self, dt, pat, count, singleton,
creation_func, creation_obj):
"""Structured object reference counting in creation and deletion"""
# The test assumes that 0, 1, and None are singletons.
gc.collect()
before = sys.getrefcount(creation_obj)
arr = creation_func(3, dt)
now = sys.getrefcount(creation_obj)
assert now - before == count * 3
del arr
now = sys.getrefcount(creation_obj)
assert now == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_item_setting(self, dt, pat, count, singleton):
"""Structured object reference counting for simple item setting"""
one = 1
gc.collect()
before = sys.getrefcount(singleton)
arr = np.array([pat] * 3, dt)
assert sys.getrefcount(singleton) - before == count * 3
# Fill with `1` and check that it was replaced correctly:
before2 = sys.getrefcount(one)
arr[...] = one
after2 = sys.getrefcount(one)
assert after2 - before2 == count * 3
del arr
gc.collect()
assert sys.getrefcount(one) == before2
assert sys.getrefcount(singleton) == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(
['shape', 'index', 'items_changed'],
[((3,), ([0, 2],), 2),
((3, 2), ([0, 2], slice(None)), 4),
((3, 2), ([0, 2], [1]), 2),
((3,), ([True, False, True]), 2)])
def test_structured_object_indexing(self, shape, index, items_changed,
dt, pat, count, singleton):
"""Structured object reference counting for advanced indexing."""
zero = 0
one = 1
arr = np.zeros(shape, dt)
gc.collect()
before_zero = sys.getrefcount(zero)
before_one = sys.getrefcount(one)
# Test item getting:
part = arr[index]
after_zero = sys.getrefcount(zero)
assert after_zero - before_zero == count * items_changed
del part
# Test item setting:
arr[index] = one
gc.collect()
after_zero = sys.getrefcount(zero)
after_one = sys.getrefcount(one)
assert before_zero - after_zero == count * items_changed
assert after_one - before_one == count * items_changed
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
"""Structured object reference counting for specialized functions.
The older functions such as take and repeat use different code paths
then item setting (when writing this).
"""
indices = [0, 1]
arr = np.array([pat] * 3, dt)
gc.collect()
before = sys.getrefcount(singleton)
res = arr.take(indices)
after = sys.getrefcount(singleton)
assert after - before == count * 2
new = res.repeat(10)
gc.collect()
after_repeat = sys.getrefcount(singleton)
assert after_repeat - after == count * 2 * 10
class TestStructuredDtypeSparseFields:
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
"""
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
'offsets':[0, 4]}, (2, 3))])
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
'offsets':[4]}, (2, 3))])
@pytest.mark.xfail(reason="inaccessible data is changed see gh-12686.")
@pytest.mark.valgrind_error(reason="reads from uninitialized buffers.")
def test_sparse_field_assignment(self):
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[...] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
def test_sparse_field_assignment_fancy(self):
# Fancy assignment goes to the copyswap function for complex types:
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
class TestMonsterType:
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
b = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
def test_list_recursion(self):
l = list()
l.append(('f', l))
with pytest.raises(RecursionError):
np.dtype(l)
def test_tuple_recursion(self):
d = np.int32
for i in range(100000):
d = (d, (1,))
with pytest.raises(RecursionError):
np.dtype(d)
def test_dict_recursion(self):
d = dict(names=['self'], formats=[None], offsets=[0])
d['formats'][0] = d
with pytest.raises(RecursionError):
np.dtype(d)
class TestMetadata:
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
assert_raises(TypeError, np.dtype, int, metadata='datum')
assert_raises(TypeError, np.dtype, int, metadata=1)
assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
assert_(d['a'].metadata == {'datum': 1})
def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
class TestString:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['r','b'],"
" 'formats':['u1','u1'],"
" 'offsets':[0,2],"
" 'titles':['Red pixel','Blue pixel'],"
" 'itemsize':3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names':['r','b'], "
"'formats':['u1','u1'], "
"'offsets':[0,2], "
"'titles':['Red pixel','Blue pixel'], "
"'itemsize':4})")
def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
def test_repr_str_subarray(self):
dt = np.dtype(('<i2', (1,)))
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
assert_equal(str(dt), "('<i2', (1,))")
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
def test_empty_string_to_object(self):
# Pull request #4722
np.array(["", ""]).astype(object)
def test_void_subclass_unsized(self):
dt = np.dtype(np.record)
assert_equal(repr(dt), "dtype('V')")
assert_equal(str(dt), '|V0')
assert_equal(dt.name, 'record')
def test_void_subclass_sized(self):
dt = np.dtype((np.record, 2))
assert_equal(repr(dt), "dtype('V2')")
assert_equal(str(dt), '|V2')
assert_equal(dt.name, 'record16')
def test_void_subclass_fields(self):
dt = np.dtype((np.record, [('a', '<u2')]))
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
assert_equal(dt.name, 'record16')
class TestDtypeAttributeDeletion:
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["subdtype", "descr", "str", "name", "base", "shape",
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["names"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
class TestDtypeAttributes:
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
pass
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
class TestPickling:
def check_pickling(self, dtype):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.loads(pickle.dumps(dtype, proto))
assert_equal(pickled, dtype)
assert_equal(pickled.descr, dtype.descr)
if dtype.metadata is not None:
assert_equal(pickled.metadata, dtype.metadata)
# Check the reconstructed dtype is functional
x = np.zeros(3, dtype=dtype)
y = np.zeros(3, dtype=pickled)
assert_equal(x, y)
assert_equal(x[0], y[0])
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode, bool])
def test_builtin(self, t):
self.check_pickling(np.dtype(t))
def test_structured(self):
dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
self.check_pickling(dt)
def test_structured_aligned(self):
dt = np.dtype('i4, i1', align=True)
self.check_pickling(dt)
def test_structured_unaligned(self):
dt = np.dtype('i4, i1', align=False)
self.check_pickling(dt)
def test_structured_padded(self):
dt = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
self.check_pickling(dt)
def test_structured_titles(self):
dt = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
self.check_pickling(dt)
@pytest.mark.parametrize('base', ['m8', 'M8'])
@pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
'ms', 'us', 'ns', 'ps', 'fs', 'as'])
def test_datetime(self, base, unit):
dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
self.check_pickling(dt)
if unit:
dt = np.dtype('%s[7%s]' % (base, unit))
self.check_pickling(dt)
def test_metadata(self):
dt = np.dtype(int, metadata={'datum': 1})
self.check_pickling(dt)
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
assert_raises(OverflowError, a, 'int8')
# test that dtype detection finds user-defined types
x = rational(1)
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
def test_dtypes_are_true():
# test for gh-6294
assert bool(np.dtype('f8'))
assert bool(np.dtype('i8'))
assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
def test_invalid_dtype_string():
# test for gh-10440
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
def test_keyword_argument():
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
class TestFromDTypeAttribute:
def test_simple(self):
class dt:
dtype = "f8"
assert np.dtype(dt) == np.float64
assert np.dtype(dt()) == np.float64
def test_recursion(self):
class dt:
pass
dt.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt)
dt_instance = dt()
dt_instance.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt_instance)
def test_void_subtype(self):
class dt(np.void):
# This code path is fully untested before, so it is unclear
# what this should be useful for. Note that if np.void is used
# numpy will think we are deallocating a base type [1.17, 2019-02].
dtype = np.dtype("f,f")
pass
np.dtype(dt)
np.dtype(dt(1))
def test_void_subtype_recursion(self):
class dt(np.void):
pass
dt.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt)
with pytest.raises(RecursionError):
np.dtype(dt(1))
class TestDTypeClasses:
@pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational])
def test_basic_dtypes_subclass_properties(self, dtype):
# Note: Except for the isinstance and type checks, these attributes
# are considered currently private and may change.
dtype = np.dtype(dtype)
assert isinstance(dtype, np.dtype)
assert type(dtype) is not np.dtype
assert type(dtype).__name__ == f"dtype[{dtype.type.__name__}]"
assert type(dtype).__module__ == "numpy"
assert not type(dtype)._abstract
# the flexible dtypes and datetime/timedelta have additional parameters
# which are more than just storage information, these would need to be
# given when creating a dtype:
parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)
if dtype.type not in parametric:
assert not type(dtype)._parametric
assert type(dtype)() is dtype
else:
assert type(dtype)._parametric
with assert_raises(TypeError):
type(dtype)()
def test_dtype_superclass(self):
assert type(np.dtype) is not type
assert isinstance(np.dtype, type)
assert type(np.dtype).__name__ == "_DTypeMeta"
assert type(np.dtype).__module__ == "numpy"
assert np.dtype._abstract
class TestFromCTypes:
@staticmethod
def check(ctype, dtype):
dtype = np.dtype(dtype)
assert_equal(np.dtype(ctype), dtype)
assert_equal(np.dtype(ctype()), dtype)
def test_array(self):
c8 = ctypes.c_uint8
self.check( 3 * c8, (np.uint8, (3,)))
self.check( 1 * c8, (np.uint8, (1,)))
self.check( 0 * c8, (np.uint8, (0,)))
self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
def test_padded_structure(self):
class PaddedStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
], align=True)
self.check(PaddedStruct, expected)
def test_bit_fields(self):
class BitfieldStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8, 7),
('b', ctypes.c_uint8, 1)
]
assert_raises(TypeError, np.dtype, BitfieldStruct)
assert_raises(TypeError, np.dtype, BitfieldStruct())
def test_pointer(self):
p_uint8 = ctypes.POINTER(ctypes.c_uint8)
assert_raises(TypeError, np.dtype, p_uint8)
def test_void_pointer(self):
self.check(ctypes.c_void_p, np.uintp)
def test_union(self):
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
]
expected = np.dtype(dict(
names=['a', 'b'],
formats=[np.uint8, np.uint16],
offsets=[0, 0],
itemsize=2
))
self.check(Union, expected)
def test_union_with_struct_packed(self):
class Struct(ctypes.Structure):
_pack_ = 1
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_union_packed(self):
class Struct(ctypes.Structure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
class Union(ctypes.Union):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
])
self.check(PackedStructure, expected)
def test_large_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 2
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint8),
('d', ctypes.c_uint16),
('e', ctypes.c_uint32),
('f', ctypes.c_uint32),
('g', ctypes.c_uint8)
]
expected = np.dtype(dict(
formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
offsets=[0, 2, 4, 6, 8, 12, 16],
names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
itemsize=18))
self.check(PackedStructure, expected)
def test_big_endian_structure_packed(self):
class BigEndStruct(ctypes.BigEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '>u4')])
self.check(BigEndStruct, expected)
def test_little_endian_structure_packed(self):
class LittleEndStruct(ctypes.LittleEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '<u4')])
self.check(LittleEndStruct, expected)
def test_little_endian_structure(self):
class PaddedStruct(ctypes.LittleEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '<B'),
('b', '<H')
], align=True)
self.check(PaddedStruct, expected)
def test_big_endian_structure(self):
class PaddedStruct(ctypes.BigEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '>B'),
('b', '>H')
], align=True)
self.check(PaddedStruct, expected)
def test_simple_endian_types(self):
self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
all_types = set(np.typecodes['All'])
all_pairs = permutations(all_types, 2)
@pytest.mark.parametrize("pair", all_pairs)
def test_pairs(self, pair):
"""
Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
"""
# gh-5645: check that np.dtype('i,L') can be used
pair_type = np.dtype('{},{}'.format(*pair))
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
assert_equal(pair_type, expected)
class TestUserDType:
@pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
def test_custom_structured_dtype(self):
class mytype:
pass
blueprint = np.dtype([("field", object)])
dt = create_custom_field_dtype(blueprint, mytype, 0)
assert dt.type == mytype
# We cannot (currently) *create* this dtype with `np.dtype` because
# mytype does not inherit from `np.generic`. This seems like an
# unnecessary restriction, but one that has been around forever:
assert np.dtype(mytype) == np.dtype("O")
def test_custom_structured_dtype_errors(self):
class mytype:
pass
blueprint = np.dtype([("field", object)])
with pytest.raises(ValueError):
# Tests what happens if fields are unset during creation
# which is currently rejected due to the containing object
# (see PyArray_RegisterDataType).
create_custom_field_dtype(blueprint, mytype, 1)
with pytest.raises(RuntimeError):
# Tests that a dtype must have its type field set up to np.dtype
# or in this case a builtin instance.
create_custom_field_dtype(blueprint, mytype, 2)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes attachments API."""
from oslo_log import log as logging
import webob
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v3.views import attachments as attachment_views
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
from cinder.volume import api as volume_api
LOG = logging.getLogger(__name__)
class AttachmentsController(wsgi.Controller):
"""The Attachments API controller for the OpenStack API."""
_view_builder_class = attachment_views.ViewBuilder
allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'}
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.volume_api = volume_api.API()
self.ext_mgr = ext_mgr
super(AttachmentsController, self).__init__()
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def show(self, req, id):
"""Return data about the given attachment."""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
return attachment_views.ViewBuilder.detail(attachment)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def index(self, req):
"""Return a summary list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def detail(self, req):
"""Return a detailed list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments, detail=True)
@common.process_general_filtering('attachment')
def _process_attachment_filtering(self, context=None, filters=None,
req_version=None):
utils.remove_invalid_filter_options(context, filters,
self.allowed_filters)
def _items(self, req):
"""Return a list of attachments, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
self._process_attachment_filtering(context=context,
filters=search_opts,
req_version=req_version)
if search_opts.get('instance_id', None):
search_opts['instance_uuid'] = search_opts.pop('instance_id', None)
if context.is_admin and 'all_tenants' in search_opts:
del search_opts['all_tenants']
return objects.VolumeAttachmentList.get_all(
context, search_opts=search_opts, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs)
else:
return objects.VolumeAttachmentList.get_all_by_project(
context, context.project_id, search_opts=search_opts,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_direction=sort_dirs)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@wsgi.response(202)
def create(self, req, body):
"""Create an attachment.
This method can be used to create an empty attachment (reserve) or to
create and initialize a volume attachment based on the provided input
parameters.
If the caller does not yet have the connector information but needs to
reserve an attachment for the volume (ie Nova BootFromVolume) the
create can be called with just the volume-uuid and the server
identifier. This will reserve an attachment, mark the volume as
reserved and prevent any new attachment_create calls from being made
until the attachment is updated (completed).
The alternative is that the connection can be reserved and initialized
all at once with a single call if the caller has all of the required
information (connector data) at the time of the call.
NOTE: In Nova terms server == instance, the server_id parameter
referenced below is the UUID of the Instance, for non-nova consumers
this can be a server UUID or some other arbitrary unique identifier.
Expected format of the input parameter 'body':
.. code-block:: json
{
"attachment":
{
"volume_uuid": "volume-uuid",
"instance_uuid": "nova-server-uuid",
"connector": "null|<connector-object>"
}
}
Example connector:
.. code-block:: json
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "null|rw|ro"
}
}
NOTE all that's required for a reserve is volume_uuid
and an instance_uuid.
returns: A summary view of the attachment object
"""
context = req.environ['cinder.context']
instance_uuid = body['attachment'].get('instance_uuid', None)
if not instance_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'instance_uuid' "
"to create attachment."))
volume_uuid = body['attachment'].get('volume_uuid', None)
if not volume_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'volume_uuid' "
"to create attachment."))
volume_ref = objects.Volume.get_by_id(
context,
volume_uuid)
connector = body['attachment'].get('connector', None)
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_create(context,
volume_ref,
instance_uuid,
connector=connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg)
except Exception as ex:
err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def update(self, req, id, body):
"""Update an attachment record.
Update a reserved attachment record with connector information and set
up the appropriate connection_info from the driver.
Expected format of the input parameter 'body':
.. code:: json
{
"attachment":
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False,
"mountpoint": "/dev/vdb",
"mode": None|"rw"|"ro",
}
}
}
"""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
connector = body['attachment'].get('connector', None)
if not connector:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector' "
"to update attachment."))
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_update(context,
attachment_ref,
connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = (
_("Unable to update attachment.(%s).") % ex.msg)
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to update the attachment.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
# TODO(jdg): Test this out some more, do we want to return and object
# or a dict?
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def delete(self, req, id):
"""Delete an attachment.
Disconnects/Deletes the specified attachment, returns a list of any
known shared attachment-id's for the effected backend device.
returns: A summary list of any attachments sharing this connection
"""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
attachments = self.volume_api.attachment_delete(context, attachment)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.response(202)
@wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION)
@wsgi.action('os-complete')
def complete(self, req, id, body):
"""Mark a volume attachment process as completed (in-use)."""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
volume_ref = objects.Volume.get_by_id(
context,
attachment_ref.volume_id)
attachment_ref.update({'attach_status': 'attached'})
attachment_ref.save()
volume_ref.update({'status': 'in-use', 'attach_status': 'attached'})
volume_ref.save()
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(AttachmentsController(ext_mgr))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest
import mox
from nose.plugins.attrib import attr
from oslo.config import cfg
from heat.common import context
from heat.common import exception
from heat.common import template_format
from heat.engine import parser
from heat.engine.resources import user
from heat.tests import fakes
@attr(tag=['unit', 'resource', 'User'])
@attr(speed='fast')
class UserTest(unittest.TestCase):
def setUp(self):
self.m = mox.Mox()
self.fc = fakes.FakeKeystoneClient(username='test_stack.CfnUser')
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
def tearDown(self):
self.m.UnsetStubs()
print "UserTest teardown complete"
def load_template(self, template_name='Rails_Single_Instance.template'):
self.path = os.path.dirname(os.path.realpath(__file__)).\
replace('heat/tests', 'templates')
f = open("%s/%s" % (self.path, template_name))
t = template_format.parse(f.read())
f.close()
return t
def parse_stack(self, t):
ctx = context.RequestContext.from_dict({
'tenant_id': 'test_tenant',
'username': 'test_username',
'password': 'password',
'auth_url': 'http://localhost:5000/v2.0'})
template = parser.Template(t)
params = parser.Parameters('test_stack',
template,
{'KeyName': 'test',
'DBRootPassword': 'test',
'DBUsername': 'test',
'DBPassword': 'test'})
stack = parser.Stack(ctx, 'test_stack', template, params)
return stack
def create_user(self, t, stack, resource_name):
resource = user.User(resource_name,
t['Resources'][resource_name],
stack)
self.assertEqual(None, resource.validate())
self.assertEqual(None, resource.create())
self.assertEqual(user.User.CREATE_COMPLETE, resource.state)
return resource
def test_user(self):
self.m.StubOutWithMock(user.User, 'keystone')
user.User.keystone().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
t = self.load_template()
stack = self.parse_stack(t)
resource = self.create_user(t, stack, 'CfnUser')
self.assertEqual(self.fc.user_id, resource.resource_id)
self.assertEqual('test_stack.CfnUser', resource.FnGetRefId())
self.assertEqual('CREATE_COMPLETE', resource.state)
self.assertEqual(user.User.UPDATE_REPLACE,
resource.handle_update({}))
resource.resource_id = None
self.assertEqual(None, resource.delete())
self.assertEqual('DELETE_COMPLETE', resource.state)
resource.resource_id = self.fc.access
resource.state_set('CREATE_COMPLETE')
self.assertEqual('CREATE_COMPLETE', resource.state)
self.assertEqual(None, resource.delete())
self.assertEqual('DELETE_COMPLETE', resource.state)
resource.state_set('CREATE_COMPLETE')
self.assertEqual('CREATE_COMPLETE', resource.state)
self.assertEqual(None, resource.delete())
self.assertEqual('DELETE_COMPLETE', resource.state)
self.m.VerifyAll()
def test_user_validate_policies(self):
self.m.StubOutWithMock(user.User, 'keystone')
user.User.keystone().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
tmpl = 'WordPress_Single_Instance_With_HA_AccessPolicy.template'
t = self.load_template(template_name=tmpl)
stack = self.parse_stack(t)
resource = self.create_user(t, stack, 'CfnUser')
self.assertEqual(self.fc.user_id, resource.resource_id)
self.assertEqual('test_stack.CfnUser', resource.FnGetRefId())
self.assertEqual('CREATE_COMPLETE', resource.state)
self.assertEqual([u'WebServerAccessPolicy'],
resource.properties['Policies'])
# OK
self.assertTrue(
resource._validate_policies([u'WebServerAccessPolicy']))
# Resource name doesn't exist in the stack
self.assertFalse(resource._validate_policies([u'NoExistAccessPolicy']))
# Resource name is wrong Resource type
self.assertFalse(resource._validate_policies([u'NoExistAccessPolicy',
u'WikiDatabase']))
# Wrong type (AWS embedded policy format, not yet supported)
dict_policy = {"PolicyName": "AccessForCFNInit",
"PolicyDocument":
{"Statement": [{"Effect": "Allow",
"Action":
"cloudformation:DescribeStackResource",
"Resource": "*"}]}}
# However we should just ignore it to avoid breaking existing templates
self.assertTrue(resource._validate_policies([dict_policy]))
self.m.VerifyAll()
def test_user_create_bad_policies(self):
self.m.ReplayAll()
tmpl = 'WordPress_Single_Instance_With_HA_AccessPolicy.template'
t = self.load_template(template_name=tmpl)
t['Resources']['CfnUser']['Properties']['Policies'] = ['NoExistBad']
stack = self.parse_stack(t)
resource_name = 'CfnUser'
resource = user.User(resource_name,
t['Resources'][resource_name],
stack)
self.assertRaises(exception.InvalidTemplateAttribute,
resource.handle_create)
self.m.VerifyAll()
def test_user_access_allowed(self):
self.m.StubOutWithMock(user.User, 'keystone')
user.User.keystone().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(user.AccessPolicy, 'access_allowed')
user.AccessPolicy.access_allowed('a_resource').AndReturn(True)
user.AccessPolicy.access_allowed('b_resource').AndReturn(False)
self.m.ReplayAll()
tmpl = 'WordPress_Single_Instance_With_HA_AccessPolicy.template'
t = self.load_template(template_name=tmpl)
stack = self.parse_stack(t)
resource = self.create_user(t, stack, 'CfnUser')
self.assertEqual(self.fc.user_id, resource.resource_id)
self.assertEqual('test_stack.CfnUser', resource.FnGetRefId())
self.assertEqual('CREATE_COMPLETE', resource.state)
self.assertTrue(resource.access_allowed('a_resource'))
self.assertFalse(resource.access_allowed('b_resource'))
self.m.VerifyAll()
def test_user_access_allowed_ignorepolicy(self):
self.m.StubOutWithMock(user.User, 'keystone')
user.User.keystone().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(user.AccessPolicy, 'access_allowed')
user.AccessPolicy.access_allowed('a_resource').AndReturn(True)
user.AccessPolicy.access_allowed('b_resource').AndReturn(False)
self.m.ReplayAll()
tmpl = 'WordPress_Single_Instance_With_HA_AccessPolicy.template'
t = self.load_template(template_name=tmpl)
t['Resources']['CfnUser']['Properties']['Policies'] = [
'WebServerAccessPolicy', {'an_ignored': 'policy'}]
stack = self.parse_stack(t)
resource = self.create_user(t, stack, 'CfnUser')
self.assertEqual(self.fc.user_id, resource.resource_id)
self.assertEqual('test_stack.CfnUser', resource.FnGetRefId())
self.assertEqual('CREATE_COMPLETE', resource.state)
self.assertTrue(resource.access_allowed('a_resource'))
self.assertFalse(resource.access_allowed('b_resource'))
self.m.VerifyAll()
@attr(tag=['unit', 'resource', 'AccessKey'])
@attr(speed='fast')
class AccessKeyTest(unittest.TestCase):
def setUp(self):
self.m = mox.Mox()
self.fc = fakes.FakeKeystoneClient(username='test_stack.CfnUser')
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
def tearDown(self):
self.m.UnsetStubs()
print "AccessKey teardown complete"
def load_template(self):
self.path = os.path.dirname(os.path.realpath(__file__)).\
replace('heat/tests', 'templates')
f = open("%s/Rails_Single_Instance.template" % self.path)
t = template_format.parse(f.read())
f.close()
return t
def parse_stack(self, t):
ctx = context.RequestContext.from_dict({
'tenant_id': 'test_tenant',
'username': 'test_username',
'password': 'password',
'auth_url': 'http://localhost:5000/v2.0'})
template = parser.Template(t)
params = parser.Parameters('test_stack',
template,
{'KeyName': 'test',
'DBRootPassword': 'test',
'DBUsername': 'test',
'DBPassword': 'test'})
stack = parser.Stack(ctx, 'test_stack', template, params)
return stack
def create_access_key(self, t, stack, resource_name):
resource = user.AccessKey(resource_name,
t['Resources'][resource_name],
stack)
self.assertEqual(None, resource.validate())
self.assertEqual(None, resource.create())
self.assertEqual(user.AccessKey.CREATE_COMPLETE,
resource.state)
return resource
def test_access_key(self):
self.m.StubOutWithMock(user.AccessKey, 'keystone')
user.AccessKey.keystone().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
t = self.load_template()
# Override the Ref for UserName with a hard-coded name,
# so we don't need to create the User resource
t['Resources']['HostKeys']['Properties']['UserName'] =\
'test_stack.CfnUser'
stack = self.parse_stack(t)
stack.resources['CfnUser'].resource_id = self.fc.user_id
stack.resources['CfnUser'].state = 'CREATE_COMPLETE'
resource = self.create_access_key(t, stack, 'HostKeys')
self.assertEqual(user.AccessKey.UPDATE_REPLACE,
resource.handle_update({}))
self.assertEqual(self.fc.access,
resource.resource_id)
self.assertEqual(self.fc.secret,
resource._secret)
self.assertEqual(resource.FnGetAtt('UserName'), 'test_stack.CfnUser')
resource._secret = None
self.assertEqual(resource.FnGetAtt('SecretAccessKey'),
self.fc.secret)
self.assertRaises(exception.InvalidTemplateAttribute,
resource.FnGetAtt, 'Foo')
self.assertEqual(None, resource.delete())
self.m.VerifyAll()
def test_access_key_no_user(self):
self.m.ReplayAll()
t = self.load_template()
# Set the resource properties UserName to an unknown user
t['Resources']['HostKeys']['Properties']['UserName'] =\
'test_stack.NoExist'
stack = self.parse_stack(t)
stack.resources['CfnUser'].resource_id = self.fc.user_id
resource = user.AccessKey('HostKeys',
t['Resources']['HostKeys'],
stack)
self.assertEqual('could not find user test_stack.NoExist',
resource.create())
self.assertEqual(user.AccessKey.CREATE_FAILED,
resource.state)
self.m.VerifyAll()
@attr(tag=['unit', 'resource', 'AccessPolicy'])
@attr(speed='fast')
class AccessPolicyTest(unittest.TestCase):
def setUp(self):
self.m = mox.Mox()
self.fc = fakes.FakeKeystoneClient(username='test_stack.CfnUser')
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
def tearDown(self):
self.m.UnsetStubs()
print "UserTest teardown complete"
def load_template(self):
template_name =\
'WordPress_Single_Instance_With_HA_AccessPolicy.template'
self.path = os.path.dirname(os.path.realpath(__file__)).\
replace('heat/tests', 'templates')
f = open("%s/%s" % (self.path, template_name))
t = template_format.parse(f.read())
f.close()
return t
def parse_stack(self, t):
ctx = context.RequestContext.from_dict({
'tenant_id': 'test_tenant',
'username': 'test_username',
'password': 'password',
'auth_url': 'http://localhost:5000/v2.0'})
template = parser.Template(t)
params = parser.Parameters('test_stack',
template,
{'KeyName': 'test',
'DBRootPassword': 'test',
'DBUsername': 'test',
'DBPassword': 'test'})
stack = parser.Stack(ctx, 'test_stack', template, params)
return stack
def test_accesspolicy_create_ok(self):
t = self.load_template()
stack = self.parse_stack(t)
resource_name = 'WebServerAccessPolicy'
resource = user.AccessPolicy(resource_name,
t['Resources'][resource_name],
stack)
self.assertEqual(None, resource.create())
self.assertEqual(user.User.CREATE_COMPLETE, resource.state)
def test_accesspolicy_create_ok_empty(self):
t = self.load_template()
resource_name = 'WebServerAccessPolicy'
t['Resources'][resource_name]['Properties']['AllowedResources'] = []
stack = self.parse_stack(t)
resource = user.AccessPolicy(resource_name,
t['Resources'][resource_name],
stack)
self.assertEqual(None, resource.create())
self.assertEqual(user.User.CREATE_COMPLETE, resource.state)
def test_accesspolicy_create_err_notfound(self):
t = self.load_template()
resource_name = 'WebServerAccessPolicy'
t['Resources'][resource_name]['Properties']['AllowedResources'] = [
'NoExistResource']
stack = self.parse_stack(t)
resource = user.AccessPolicy(resource_name,
t['Resources'][resource_name],
stack)
self.assertRaises(exception.ResourceNotFound, resource.handle_create)
def test_accesspolicy_update(self):
t = self.load_template()
resource_name = 'WebServerAccessPolicy'
stack = self.parse_stack(t)
resource = user.AccessPolicy(resource_name,
t['Resources'][resource_name],
stack)
self.assertEqual(user.AccessPolicy.UPDATE_REPLACE,
resource.handle_update({}))
def test_accesspolicy_access_allowed(self):
t = self.load_template()
resource_name = 'WebServerAccessPolicy'
stack = self.parse_stack(t)
resource = user.AccessPolicy(resource_name,
t['Resources'][resource_name],
stack)
self.assertTrue(resource.access_allowed('WikiDatabase'))
self.assertFalse(resource.access_allowed('NotWikiDatabase'))
self.assertFalse(resource.access_allowed(None))
|
|
#! usr/bin/env python
import optparse, os, csv, glob, sys
import MySQLdb
import PEATSA.Core as Core
import PEATSA.Core.Matrix
import matplotlib.pyplot as plt
import numpy as np
class ProteinComplexTool:
def __init__(self):
return
def DeltaStability(self,inputFile, mutationList, configurationFile, workingDirectory, outputDirectory):
'''Calculates the stability difference between a protein and set of mutants
Parameters:
inputFile: A PDB file of the protein
mutationList: A list of Data.MutationSet instances. Each represents a mutant of the protein.
configurationFile: The location of a proteinDesignTool.conf file - defaults to home directory.
workingDirectory: Where the calculation will be run.
outputDirectory: Where the results will be written.
Returns
A Data.DataSet instance containing one matrix, stabilityResults.
Each row of this matrix corresponds to a mutant defined in the mutationList argument.'''
#Create the ProteinDesignTool instance
tool = Core.ProteinDesignTool.ProteinDesignTool(configurationFile,
workingDirectory=workingDirectory,
pdbFile=inputFile,
outputDirectory=outputDirectory,
removeHeterogens=True)
#The above cleans the pdb file and copies it to the working directory.
#Use this pdb from now on.
inputFile = tool.pdbFile
#Create the mutants
mutantCollection = Core.Data.MutantCollection(pdbFile=inputFile,mutationList=mutationList,location=outputDirectory,temporary=True)
#Run stability calculation
#The results are added to the ProteinDesignTool instance's dataDirectory attribute
#This is an instance of Data.DataSet class
tool.runStabilityCalculation(mutantFiles=mutantCollection.mutantFiles())
#Clean up - Deletes files copied to the working directory for Uffbaps
tool.cleanUp()
return tool.dataDirectory
def remALT(self,pdbfile):
import Protool
x = Protool.structureIO()
x.readpdb('%s.pdb' % (pdbfile))
x.RemoveALT()
x.writepdb('%s.pdb' % (pdbfile), dont_write_HETATMS=1)
print 'Removed alternate residues'
def splitter(self,pdbDir,pdb,reactions_list,cur,db):
import string
if reactions_list == ['']:
# query the database
cur.execute("SELECT DISTINCT Chain_ID from pdb where PDB_ID = '%s';" % (pdb))
a = cur.fetchall() # fetch results
print 'a', a
expr=[]
chains = [i[0] for i in a]
for i in chains:
s=["segid "+i]
expr.append(s)
b = str(a) # convert from tuple to string
exclude = set(string.punctuation) # set of punctutation characters
b = ''.join(ch for ch in b if ch not in exclude) # remove punctuation from b
e = ''.join(b.split(' '))
self.do_split(pdbDir, pdb, expr, e)
return e
else:
expr1=[]
for c in reactions_list:
if len(c)>1:
s = ["segid "+i for i in c]
expr1.append(s)
else:
expr1.append(["segid "+c])
self.do_split(pdbDir,pdb, expr1, reactions_list)
return reactions_list
def do_split(self,pdbDir,pdb, expr, e):
import MDAnalysis
u = MDAnalysis.Universe(pdbDir, permissive=False)
for i in range(len(expr)):
print expr[i]
Z = u.selectAtoms(*expr[i])
Z.write('%s_%s.pdb' % (pdb,e[i]))
print 'Extracted chain(s)', e[i],'from', pdb
def createMutlist(self,pdb):
mutList = Core.Data.CreateScanList(pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
return mutList
def displayResults(self,pdb,split_list,comp_list,cur,db):
width=0.5
cur.execute("SELECT * FROM results_%s;" % (split_list[0]))
complexResults = cur.fetchall()
mutations = [i[0] for i in complexResults] # Mutation list
complexScores = [i[1] for i in complexResults] # dG scores of pdb complex
count = len(mutations) # Number of calcs
ind = np.arange(count)
if len(split_list)>1: # For binding calcs, no matter in what order chains were split
chainResults = []
for i in split_list[1:]:
cur.execute("select * from results_%s;" % (i))
chainResults.append(cur.fetchall())
chainScores = [i[1] for y in chainResults for i in y] # dG scores of chains split from pdb
ddG = []
cur.execute("create table if not exists ddG_%s_%s(mutation VARCHAR(10), ddG FLOAT);" % (pdb, comp_list))
for i in range(len(complexScores)):
ddG.append(complexScores[i] - chainScores[i])
for i in range(len(mutations)):
print "ddG", mutations[i], ddG[i]
cur.execute("insert into ddG_%s_%s (mutation, ddG) VALUES (%s%s%s, %s%s%s);" % (pdb,comp_list, '"', mutations[i], '"', '"',ddG[i],'"'))
plt.plot(ind+(width/2), ddG, 'o-')
plt.axhline(linewidth=2, color='r')
plt.title("ddG Binding calculations for ALA scan of %s" % (split_list[0]))
else:
for i in range(len(mutations)):
print mutations[i], complexScores[i]
plt.bar(ind,complexScores,width,color='r')
plt.title("dG Stability calculations for ALA scan of %s" % (split_list[0]))
plt.xticks(ind+(width/2), mutations, rotation=90, fontsize=8)
plt.show()
sys.exit()
def main():
# Run program
# Connect to local database containing info about BMP pdbs
db = MySQLdb.connect(host="localhost", user = "root", passwd = "samsung", db = "sat")
cur = db.cursor()
cur.execute("SELECT VERSION()")
ver = cur.fetchone()
print "MySQLdb connection successful"
print "MySQL server version:", ver[0]
# Show pdbs in database
cur.execute("SELECT distinct PDB_ID from pdb;")
print "PDBs in database:"
a = cur.fetchall()
b = ','.join([i[0] for i in a])
print b
# Option to select pdb, config file, working dir etc..
parser = optparse.OptionParser()
# PDB option
parser.add_option("-p", "--pdb", help="Choose all or a pdb id", dest="pdb", default ="all")
# Mutation List or ALA scan option
parser.add_option("-m", "--mutationList", help="Location of mutation list file", dest="mutList", default="ALA")
# Configuration File
parser.add_option("-c", "--configurationFile", help="Location of configuration file", dest="configFile", default="/home/satnam/proteinDesignTool.conf")
# Output Directory
parser.add_option("-o", "--outputDirectory", help="Location of output directory", dest="outputDir", default=os.getcwd())
# Working Directory
parser.add_option("-w", "--workingDirectory", help="Location of working directory", dest="workingDir", default=os.getcwd())
# Choose option for user-defined calculations
parser.add_option("-u", "--userCalcs", help="Choose True or False if you would like to specifiy the calculations, otherwise each chain will be split", dest="userCalcOpt", default=False)
# Show Results Option
parser.add_option("-s", "--showResults", help="Shows previous results? True or False. If they don't exist, they will be calculated.", dest="showResults", default=True)
# Delete results from database
parser.add_option("-d", "--deleteResults", help="Deletes all results for the specified pdb from the database. Default False.", dest="deleteResults", default=False)
(opts, args) = parser.parse_args()
# Instantiate the class
run = ProteinComplexTool()
# pdb name/file handling
pdb = opts.pdb
pdbFile = ''.join((pdb,'.pdb'))
pdbDir = os.path.join(opts.outputDir,pdbFile)
print pdbDir
# Checking if user selected PDB is in the database
if opts.pdb != None:
if opts.pdb not in b:
raise sys.exit('PDB not in Database, choose one from list')
if opts.pdb in b:
print 'PDB in Database'
print 'Checking what calculations can be performed'
# Check what calcs can be done with user defined PDB
cur.execute("SELECT distinct Entity_ID, Chain_ID, Chain_name, type from pdb where PDB_ID = %s%s%s;" % ('"',pdb,'"'))
entity = [] # entities in the pdbfile
chains = []
for i in cur.fetchall():
print "Entity:",i[0], "Chain Name:",i[2], "Type:",i[3] , "Chain ID:", i[1]
entity.append(i[0])
chains.append(i[1])
entity.sort()
# Delete results
if opts.deleteResults == 'True':
cur.execute("SHOW tables like 'results_%s%s';" % (pdb, '%'))
drop_tables=cur.fetchall()
print drop_tables
for i in drop_tables:
cur.execute("DROP TABLE '%s';" % (i))
print "Results for",i,"deleted"
else:
pass
# Remove Alternate Residues from pdb, will overwrite the file
run.remALT(pdb)
# User defined splitting of chains from PDB, can be left
# blank and the PDB will be split to individual chains
reactions_list = ['']
if opts.userCalcOpt != 'False':
reactants = raw_input("What reactants are consumed (enter chain IDs in the form AB+C+D):")
products = raw_input("What products are produces (enter chain IDs in the form ABC+D):")
else:
pass
# If user leaves input blank, then the default is to calculate
# every chain individually vs complex
if reactants == '':
reactants = '+'.join(chains)
else:
pass
print reactants
if products == '':
products = ''.join(chains)
else:
pass
reactants_list = reactants.split('+')
products_list = products.split('+')
# Split the pdb into chains, returns chains that have been split (A,B etc)
split_reactants = run.splitter(pdbDir,pdb,reactants_list,cur,db)
split_products = run.splitter(pdbDir,pdb,products_list,cur,db)
comp_list = split_products + split_reactants
comp_list = '_'.join(comp_list)
split_list = []
split_list_products = []
split_list_reactants = []
for i in split_reactants:
s = pdb+'_'+i
split_list_reactants.append(s)
for i in split_products:
s = pdb+'_'+i
split_list_products.append(s)
splitlist = split_list_products + split_list_reactants
for i in splitlist:
if i not in split_list:
split_list.append(i)
# Split_list is a list of the pdb and the individual pdbs
# that have been split
print split_list
# Show results
if opts.showResults == 'True':
count = 0
cur.execute("show tables;")
tables = cur.fetchall()
resTable = "".join(("results_",pdb))
for i in tables:
for y in i:
if y.startswith(resTable):
count +=1
if count != 0:
run.displayResults(pdb,split_list,comp_list,cur,db)
else:
pass
comp_list = comp_list +'_'+os.path.split(opts.mutList)[1]
# Run the calculations
# Load and check mutant list given by user, else do ALA scan
"""
if opts.mutList != "ALA":
mfile = Core.Data.MutationListFile(filename=opts.mutList,create=True)
mfile.removeDuplicates(autoUpdate=False)
mutList = mfile.mutantList()
else:
for i in split_list:
w_pdb = os.path.join(opts.outputDir,'%s.pdb' % (i))
mutList = Core.Data.CreateScanList(pdbFile=w_pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
"""
for i in split_list:
w_pdb = os.path.join(opts.outputDir,'%s.pdb' % (i))
mutList = Core.Data.CreateScanList(pdbFile=w_pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
results = run.DeltaStability(inputFile=w_pdb,
mutationList=mutList,
configurationFile=opts.configFile,
workingDirectory=opts.workingDir,
outputDirectory=opts.outputDir)
#Commit to database
cur.execute("create table if not exists results_%s(mutation VARCHAR(10), score FLOAT);" % (i))
for mutant in range(results.stabilityResults.numberOfRows()):
cur.execute("insert into results_%s (mutation, score) VALUES (%s%s%s, %s%s%s);" % (i, '"', results.stabilityResults[mutant][0], '"', '"', results.stabilityResults[mutant][-1],'"'))
print "Calculated ", i, "stability and results added to database"
# Display results
run.displayResults(pdb,split_list,comp_list,cur,db)
if __name__=='__main__':
main()
|
|
import mock
import unittest
from .helper import _ResourceMixin
class RecipientTest(_ResourceMixin, unittest.TestCase):
def _getTargetClass(self):
from .. import Recipient
return Recipient
def _getCollectionClass(self):
from .. import Collection
return Collection
def _getLazyCollectionClass(self):
from .. import LazyCollection
return LazyCollection
def _getBankAccountClass(self):
from .. import BankAccount
return BankAccount
def _makeOne(self):
return self._getTargetClass().from_data({
'object': 'recipient',
'id': 'recp_test',
'livemode': False,
'location': '/recipients/recp_test',
'verified': False,
'active': False,
'name': 'James Smith',
'email': 'secondary@recipient.co',
'description': 'Secondary recipient',
'type': 'individual',
'tax_id': '1234567890',
'bank_account': {
'object': 'bank_account',
'brand': 'test',
'last_digits': '2345',
'name': 'James Smith',
'created': '2015-06-02T05:41:53Z'
},
'failure_code': None,
'created': "2015-06-02T05:41:53Z"
})
@mock.patch('requests.post')
def test_create(self, api_call):
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "recipient",
"id": "recp_test",
"livemode": false,
"location": "/recipients/recp_test",
"verified": false,
"active": false,
"name": "James Smith",
"email": "secondary@recipient.co",
"description": "Secondary recipient",
"type": "individual",
"tax_id": "1234567890",
"bank_account": {
"object": "bank_account",
"brand": "test",
"last_digits": "2345",
"name": "James Smith",
"created": "2015-06-02T05:41:53Z"
},
"failure_code": null,
"created": "2015-06-02T05:41:53Z"
}""")
recipient = class_.create(
name='James Smith',
email='secondary@recipient.co',
description='Secondary recipient',
type='individual',
bank_account={
'brand': 'test',
'name': 'James Smith',
'number': '012345'
}
)
self.assertTrue(isinstance(recipient, class_))
self.assertEqual(recipient.id, 'recp_test')
self.assertEqual(recipient.name, 'James Smith')
self.assertEqual(recipient.description, 'Secondary recipient')
self.assertEqual(recipient.type, 'individual')
bank_account = recipient.bank_account
bank_account_class_ = self._getBankAccountClass()
self.assertTrue(isinstance(bank_account, bank_account_class_))
self.assertEqual(bank_account.brand, 'test')
self.assertEqual(bank_account.last_digits, '2345')
self.assertEqual(bank_account.name, 'James Smith')
self.assertRequest(
api_call,
'https://api.omise.co/recipients',
{
'name': 'James Smith',
'email': 'secondary@recipient.co',
'description': 'Secondary recipient',
'type': 'individual',
'bank_account': {
'brand': 'test',
'name': 'James Smith',
'number': '012345'
}
}
)
@mock.patch('requests.get')
def test_retrieve(self, api_call):
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "recipient",
"id": "recp_test",
"livemode": false,
"location": "/recipients/recp_test",
"verified": false,
"active": false,
"name": "James Smith",
"email": "secondary@recipient.co",
"description": "Secondary recipient",
"type": "individual",
"tax_id": "1234567890",
"bank_account": {
"object": "bank_account",
"brand": "test",
"last_digits": "2345",
"name": "James Smith",
"created": "2015-06-02T05:41:53Z"
},
"failure_code": null,
"created": "2015-06-02T05:41:53Z"
}""")
recipient = class_.retrieve('recp_test')
self.assertTrue(isinstance(recipient, class_))
self.assertFalse(recipient.verified)
self.assertFalse(recipient.active)
self.assertEqual(recipient.id, 'recp_test')
self.assertEqual(recipient.name, 'James Smith')
self.assertEqual(recipient.description, 'Secondary recipient')
self.assertEqual(recipient.tax_id, '1234567890')
self.assertEqual(recipient.type, 'individual')
bank_account_class_ = self._getBankAccountClass()
bank_account = recipient.bank_account
self.assertTrue(isinstance(bank_account, bank_account_class_))
self.assertEqual(bank_account.brand, 'test')
self.assertEqual(bank_account.last_digits, '2345')
self.assertEqual(bank_account.name, 'James Smith')
self.assertRequest(
api_call,
'https://api.omise.co/recipients/recp_test')
self.mockResponse(api_call, """{
"object": "recipient",
"id": "recp_test",
"livemode": false,
"location": "/recipients/recp_test",
"verified": false,
"active": false,
"name": "Foobar Baz",
"email": "secondary@recipient.co",
"description": "Secondary recipient",
"type": "individual",
"tax_id": "1234567890",
"bank_account": {
"object": "bank_account",
"brand": "test",
"last_digits": "2345",
"name": "James Smith",
"created": "2015-06-02T05:41:53Z"
},
"failure_code": null,
"created": "2015-06-02T05:41:53Z"
}""")
recipient.reload()
self.assertEqual(recipient.name, 'Foobar Baz')
@mock.patch('requests.get')
def test_retrieve_no_args(self, api_call):
class_ = self._getTargetClass()
collection_class_ = self._getCollectionClass()
self.mockResponse(api_call, """{
"object": "list",
"from": "1970-01-01T07:00:00+07:00",
"to": "2015-06-02T05:41:53+07:00",
"offset": 0,
"limit": 20,
"total": 1,
"data": [
{
"object": "recipient",
"id": "recp_test",
"livemode": false,
"location": "/recipients/recp_test",
"verified": false,
"active": false,
"name": "Foobar Baz",
"email": "secondary@recipient.co",
"description": "Secondary recipient",
"type": "individual",
"tax_id": "1234567890",
"bank_account": {
"object": "bank_account",
"brand": "test",
"last_digits": "2345",
"name": "James Smith",
"created": "2015-06-02T05:41:53Z"
},
"failure_code": null,
"created": "2015-06-02T05:41:53Z"
}
]
}""")
recipients = class_.retrieve()
self.assertTrue(isinstance(recipients, collection_class_))
self.assertTrue(isinstance(recipients[0], class_))
self.assertTrue(recipients[0].id, 'recp_test')
self.assertTrue(recipients[0].name, 'Foobar Baz')
self.assertRequest(api_call, 'https://api.omise.co/recipients')
@mock.patch('requests.get')
def test_list(self, api_call):
class_ = self._getTargetClass()
lazy_collection_class_ = self._getLazyCollectionClass()
self.mockResponse(api_call, """{
"object": "list",
"from": "1970-01-01T07:00:00+07:00",
"to": "2015-06-02T05:41:53+07:00",
"offset": 0,
"limit": 20,
"total": 1,
"data": [
{
"object": "recipient",
"id": "recp_test",
"livemode": false,
"location": "/recipients/recp_test",
"verified": false,
"active": false,
"name": "Foobar Baz",
"email": "secondary@recipient.co",
"description": "Secondary recipient",
"type": "individual",
"tax_id": "1234567890",
"bank_account": {
"object": "bank_account",
"brand": "test",
"last_digits": "2345",
"name": "James Smith",
"created": "2015-06-02T05:41:53Z"
},
"failure_code": null,
"created": "2015-06-02T05:41:53Z"
}
]
}""")
recipients = class_.list()
self.assertTrue(isinstance(recipients, lazy_collection_class_))
recipients = list(recipients)
self.assertTrue(isinstance(recipients[0], class_))
self.assertTrue(recipients[0].id, 'recp_test')
self.assertTrue(recipients[0].name, 'Foobar Baz')
@mock.patch('requests.patch')
def test_update(self, api_call):
recipient = self._makeOne()
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "recipient",
"id": "recp_test",
"livemode": false,
"location": "/recipients/recp_test",
"verified": false,
"active": false,
"name": "Foobar Baz",
"email": "secondary@recipient.co",
"description": "Secondary recipient",
"type": "individual",
"tax_id": "1234567890",
"bank_account": {
"object": "bank_account",
"brand": "test",
"last_digits": "2345",
"name": "James Smith",
"created": "2015-06-02T05:41:53Z"
},
"failure_code": null,
"created": "2015-06-02T05:41:53Z"
}""")
self.assertTrue(isinstance(recipient, class_))
self.assertEqual(recipient.name, 'James Smith')
recipient.name = 'Foobar Baz'
recipient.update()
self.assertEqual(recipient.name, 'Foobar Baz')
self.assertRequest(
api_call,
'https://api.omise.co/recipients/recp_test',
{'name': 'Foobar Baz'}
)
@mock.patch('requests.delete')
def test_destroy(self, api_call):
recipient = self._makeOne()
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "recipient",
"id": "recp_test",
"livemode": false,
"deleted": true
}""")
self.assertTrue(isinstance(recipient, class_))
self.assertEqual(recipient.id, 'recp_test')
recipient.destroy()
self.assertTrue(recipient.destroyed)
self.assertRequest(
api_call,
'https://api.omise.co/recipients/recp_test'
)
|
|
#!/usr/bin/python
import analysis
import os
import sys
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
def eval_dict( s ):
ret = None
try:
exec("ret = " + s)
except:
return None
return ret
def cdf_compare( dists, title, xl, xr, yl, yr, labels ):
mm = min(dists[0])
ma = max(dists[0])
cnt = len(dists[0])
for i in xrange(1,len(dists)):
mm = min( mm, min(dists[i]) )
ma = max( ma, max(dists[i]) )
cnt = min( cnt, len(dists[i]) )
print "cnt = " + str(cnt)
x = np.linspace( mm, ma, cnt )
i = 0
for dist in dists:
ecdf = sm.distributions.ECDF( dist )
plt.step( x, ecdf(x), label=labels[i] )
i += 1
dist.sort()
#print dist
plt.title( title )
plt.xticks( xr )
plt.yticks( yr )
plt.xlabel( xl )
plt.ylabel( yl )
plt.legend( labels, loc=4 )
plt.show()
if __name__ == "__main__":
syndicate_data_1k = {}
syndicate_data_1M = {}
syndicate_data_50M = {}
s3_data_20k = {}
s3_data_50M = {}
s3_data_100blk = {}
s3_data_100blk_nocache = {}
plc_data_100blk = {}
syndicate_data_100blk = {}
intersection = []
for expfile in os.listdir( sys.argv[1] ):
expfd = open( os.path.join( sys.argv[1], expfile ), "r" )
expdata = analysis.parse_experiments( expfd )
expfd.close()
if len(expdata['fcdistro']) > 0 and "12" not in expdata['fcdistro']:
print >> sys.stderr, "%s: wrong distro '%s'" % (expfile, expdata['fcdistro'])
continue
syndicate_exp_1k = analysis.read_experiment_data( expdata, "Nr1w-x5-small-syndicate.py" )
syndicate_exp_1M = analysis.read_experiment_data( expdata, "Nr1w-x5-1M-syndicate.py" )
syndicate_exp_50M = analysis.read_experiment_data( expdata, "Nr1w-x5-50M-syndicate-4.py" )
syndicate_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-syndicate-3.py" )
s3_exp_20k = analysis.read_experiment_data( expdata, "Nr1w-x5.py" )
s3_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-s3-cache-chunked.py" )
plc_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-planetlab-cache-chunked.py" )
s3_exp_50M = analysis.read_experiment_data( expdata, "Nr1w-x5-50M.py" )
s3_exp_100blk_nocache = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-s3-chunked.py" )
intersect = True
"""
if syndicate_exp_1k != None and len(syndicate_exp_1k) > 0 and syndicate_exp_1k[0] != None:
syndicate_data_1k[expfile] = eval_dict( syndicate_exp_1k[0][0] )
else:
intersect = False
if syndicate_exp_1M != None and len(syndicate_exp_1M) > 0 and syndicate_exp_1M[0] != None:
syndicate_data_1M[expfile] = eval_dict( syndicate_exp_1M[0][0] )
else:
intersect = False
if syndicate_exp_50M != None and len(syndicate_exp_50M) > 0 and syndicate_exp_50M[0] != None:
syndicate_data_50M[expfile] = eval_dict( syndicate_exp_50M[0][0] )
else:
intersect = False
if s3_exp_20k != None and len(s3_exp_20k) > 0 and s3_exp_20k[0] != None:
s3_data_20k[expfile] = eval_dict( s3_exp_20k[0][0] )
else:
intersect = False
if s3_exp_50M != None and len(s3_exp_50M) > 0 and s3_exp_50M[0] != None:
s3_data_50M[expfile] = eval_dict( s3_exp_50M[0][0] )
else:
intersect = False
"""
if s3_exp_100blk != None and len(s3_exp_100blk) > 0 and s3_exp_100blk[0] != None:
s3_data_100blk[expfile] = eval_dict( s3_exp_100blk[0][0] )
else:
intersect = False
if plc_exp_100blk != None and len(plc_exp_100blk) > 0 and plc_exp_100blk[-1] != None:
plc_data_100blk[expfile] = eval_dict( plc_exp_100blk[-1][0] )
else:
intersect = False
if s3_exp_100blk_nocache != None and len(s3_exp_100blk_nocache) > 0 and s3_exp_100blk_nocache[-1] != None:
s3_data_100blk_nocache[expfile] = eval_dict( s3_exp_100blk_nocache[-1][0] )
else:
intersect = False
if syndicate_exp_100blk != None and len(syndicate_exp_100blk) > 0 and syndicate_exp_100blk[-1] != None:
syndicate_data_100blk[expfile] = eval_dict( syndicate_exp_100blk[-1][0] )
else:
intersect = False
if intersect:
intersection.append( expfile )
for expfile in os.listdir( sys.argv[1] ):
if expfile not in intersection:
print >> sys.stderr, "Node %s did not pass all tests" % expfile
print >> sys.stderr, "%s nodes have data" % len(intersection)
syndicate = { 'first_1k': [], 'last_1k': [], 'first_1m': [], 'last_1m': [], 'first_50m': [], 'last_50m': [], 'first_100blk': [], 'last_100blk': [] }
s3 = { 'first_20k': [], 'last_20k': [], 'first_50m': [], 'last_50m': [], 'first_100blk': [], 'last_100blk': [], 'first_100blk_nocache': [], 'last_100blk_nocache': [] }
plc = {'first_100blk' : [], 'last_100blk': [] }
num_valid = 0
slow = []
for node in intersection:
valid = True
#data_list = [("syndicate 1k", syndicate_data_1k), ("syndicate 1M", syndicate_data_1M), ("syndicate 50M", syndicate_data_50M), ("S3 20k", s3_data_20k), ("S3 50M", s3_data_50M), ("S3 100blk", s3_data_100blk), ("PLC 100blk", plc_data_100blk)]
data_list = [("S3 100blk", s3_data_100blk), ("PLC 100blk", plc_data_100blk), ("S3 nocache 100blk", s3_data_100blk_nocache), ("Syndicate 100blk", syndicate_data_100blk)]
for (data_name, data) in data_list:
if data.get(node) == None:
print >> sys.stderr, "%s: no data for %s" % (node, data_name)
valid = False
elif data[node] == None:
print >> sys.stderr, "%s: unparseable data" % (node, data_name)
valid = False
elif len(data[node]['exception']) > 0:
print >> sys.stderr, "%s: exceptions on %s" % (node, data_name)
valid = False
if not valid:
continue;
"""
syndicate['first_1k'].append( syndicate_data_1k[node]['end_recv'][0] - syndicate_data_1k[node]['start_recv'][0] )
syndicate['last_1k'].append( syndicate_data_1k[node]['end_recv'][-1] - syndicate_data_1k[node]['start_recv'][-1] )
syndicate['first_1m'].append( syndicate_data_1M[node]['end_recv'][0] - syndicate_data_1M[node]['start_recv'][0] )
syndicate['last_1m'].append( syndicate_data_1M[node]['end_recv'][-1] - syndicate_data_1M[node]['start_recv'][-1] )
syndicate['first_50m'].append( syndicate_data_50M[node]['end_recv'][0] - syndicate_data_50M[node]['start_recv'][0] )
syndicate['last_50m'].append( syndicate_data_50M[node]['end_recv'][-1] - syndicate_data_50M[node]['start_recv'][-1] )
s3['first_20k'].append( s3_data_20k[node]['end_recv'][0] - s3_data_20k[node]['start_recv'][0] )
s3['last_20k'].append( s3_data_20k[node]['end_recv'][-1] - s3_data_20k[node]['start_recv'][-1] )
s3['first_50m'].append( s3_data_50M[node]['end_recv'][0] - s3_data_50M[node]['start_recv'][0] )
s3['last_50m'].append( s3_data_50M[node]['end_recv'][-1] - s3_data_50M[node]['start_recv'][-1] )
"""
s3['first_100blk'].append( s3_data_100blk[node]['end_recv'][0] - s3_data_100blk[node]['start_recv'][0])
s3['last_100blk'].append( s3_data_100blk[node]['end_recv'][-1] - s3_data_100blk[node]['start_recv'][-1])
s3['first_100blk_nocache'].append( s3_data_100blk_nocache[node]['end_recv'][0] - s3_data_100blk_nocache[node]['start_recv'][0] )
plc['first_100blk'].append( plc_data_100blk[node]['end_recv'][0] - plc_data_100blk[node]['start_recv'][0])
plc['last_100blk'].append( plc_data_100blk[node]['end_recv'][-1] - plc_data_100blk[node]['start_recv'][-1])
syndicate['first_100blk'].append( syndicate_data_100blk[node]['end_recv'][0] - syndicate_data_100blk[node]['start_recv'][0] )
syndicate['last_100blk'].append( syndicate_data_100blk[node]['end_recv'][-1] - syndicate_data_100blk[node]['start_recv'][-1] )
if syndicate['first_100blk'][-1] > 150:
slow.append( node )
num_valid += 1
#print "s3_first_100blk = " + str(s3['first_100blk'])
#print "s3_last_100blk = " + str(s3['last_100blk'])
print "valid: " + str(num_valid)
print "slow: \n" + "\n".join(slow)
# first 1K vs last 1K
cdf_compare( [syndicate['first_100blk'], syndicate['last_100blk'], plc['first_100blk'] ], "Syndicate One-Writer-Many-Reader Download Times", "Seconds", np.arange(0, 1000, 100), "CDF(x)", np.arange(0, 1.05, 0.05), ["Syndicate 0% Cache Hit", "Syndicate 100% Cache Hit", "Python HTTP Server and Clients"] )
#cdf_compare( [plc['first_100blk'], s3['first_100blk']], "Amazon S3 vs PLC Cache Miss Download Times", "Seconds", np.arange(0, 425, 30), "CDF(x)", np.arange(0, 1.05, 0.05) )
cdf_compare( [s3['first_100blk'], s3['first_100blk_nocache']], "Amazon S3 Cache and Direct Download Times", "Seconds", np.arange(0, 1200, 100), "CDF(x)", np.arange(0, 1.05, 0.05), ["0% hit cache hit rate", "Direct Download"] )
cdf_compare( [s3['first_100blk'], s3['last_100blk']], "Amazon S3 Cache Miss and Cache Hit Download Times", "Seconds", np.arange(0, 425, 30), "CDF(x)", np.arange(0, 1.05, 0.05) )
cdf_compare( [syndicate['first_1k'], syndicate['last_1k']] )
cdf_compare( [syndicate['first_50m'], s3['first_50m']] )
cdf_compare( [syndicate['last_50m'], s3['last_50m']] )
#cdf_compare( [syndicate['last_1m'], s3['last_20k']] )
|
|
# Copyright (c) 2010-2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
from m5.objects import *
from Benchmarks import *
from m5.util import convert
class CowIdeDisk(IdeDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
def childImage(self, ci):
self.image.child.image_file = ci
class MemBus(CoherentBus):
badaddr_responder = BadAddr()
default = Self.badaddr_responder.pio
def makeLinuxAlphaSystem(mem_mode, mdesc = None):
IO_address_space_base = 0x80000000000
class BaseTsunami(Tsunami):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
self = LinuxAlphaSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentBus()
self.membus = MemBus()
# By default the bridge responds to all addresses above the I/O
# base address (including the PCI config space)
self.bridge = Bridge(delay='50ns', nack_delay='4ns',
ranges = [AddrRange(IO_address_space_base, Addr.max)])
self.physmem = SimpleMemory(range = AddrRange(mdesc.mem()))
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.physmem.port = self.membus.master
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.tsunami = BaseTsunami()
self.tsunami.attachIO(self.iobus)
self.tsunami.ide.pio = self.iobus.master
self.tsunami.ide.config = self.iobus.master
self.tsunami.ide.dma = self.iobus.slave
self.tsunami.ethernet.pio = self.iobus.master
self.tsunami.ethernet.config = self.iobus.master
self.tsunami.ethernet.dma = self.iobus.slave
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('vmlinux_2.6.27')
self.pal = binary('ts_osfpal')
self.console = binary('console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
self.system_port = self.membus.slave
return self
def makeLinuxAlphaRubySystem(mem_mode, mdesc = None):
class BaseTsunami(Tsunami):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
physmem = SimpleMemory(range = AddrRange(mdesc.mem()))
self = LinuxAlphaSystem(physmem = physmem)
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
# Create pio bus to connect all device pio ports to rubymem's pio port
self.piobus = NoncoherentBus()
#
# Pio functional accesses from devices need direct access to memory
# RubyPort currently does support functional accesses. Therefore provide
# the piobus a direct connection to physical memory
#
self.piobus.master = physmem.port
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.tsunami = BaseTsunami()
self.tsunami.attachIO(self.piobus)
self.tsunami.ide.pio = self.piobus.master
self.tsunami.ide.config = self.piobus.master
self.tsunami.ethernet.pio = self.piobus.master
self.tsunami.ethernet.config = self.piobus.master
#
# Store the dma devices for later connection to dma ruby ports.
# Append an underscore to dma_devices to avoid the SimObjectVector check.
#
self._dma_ports = [self.tsunami.ide.dma, self.tsunami.ethernet.dma]
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('vmlinux_2.6.27')
self.pal = binary('ts_osfpal')
self.console = binary('console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
return self
def makeSparcSystem(mem_mode, mdesc = None):
# Constants from iob.cc and uart8250.cc
iob_man_addr = 0x9800000000
uart_pio_size = 8
class CowMmDisk(MmDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
def childImage(self, ci):
self.image.child.image_file = ci
self = SparcSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentBus()
self.membus = MemBus()
self.bridge = Bridge(delay='50ns', nack_delay='4ns')
self.t1000 = T1000()
self.t1000.attachOnChipIO(self.membus)
self.t1000.attachIO(self.iobus)
self.physmem = SimpleMemory(range = AddrRange(Addr('1MB'), size = '64MB'),
zero = True)
self.physmem2 = SimpleMemory(range = AddrRange(Addr('2GB'), size ='256MB'),
zero = True)
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.physmem.port = self.membus.master
self.physmem2.port = self.membus.master
self.rom.port = self.membus.master
self.nvram.port = self.membus.master
self.hypervisor_desc.port = self.membus.master
self.partition_desc.port = self.membus.master
self.intrctrl = IntrControl()
self.disk0 = CowMmDisk()
self.disk0.childImage(disk('disk.s10hw2'))
self.disk0.pio = self.iobus.master
# The puart0 and hvuart are placed on the IO bus, so create ranges
# for them. The remaining IO range is rather fragmented, so poke
# holes for the iob and partition descriptors etc.
self.bridge.ranges = \
[
AddrRange(self.t1000.puart0.pio_addr,
self.t1000.puart0.pio_addr + uart_pio_size - 1),
AddrRange(self.disk0.pio_addr,
self.t1000.fake_jbi.pio_addr +
self.t1000.fake_jbi.pio_size - 1),
AddrRange(self.t1000.fake_clk.pio_addr,
iob_man_addr - 1),
AddrRange(self.t1000.fake_l2_1.pio_addr,
self.t1000.fake_ssi.pio_addr +
self.t1000.fake_ssi.pio_size - 1),
AddrRange(self.t1000.hvuart.pio_addr,
self.t1000.hvuart.pio_addr + uart_pio_size - 1)
]
self.reset_bin = binary('reset_new.bin')
self.hypervisor_bin = binary('q_new.bin')
self.openboot_bin = binary('openboot_new.bin')
self.nvram_bin = binary('nvram1')
self.hypervisor_desc_bin = binary('1up-hv.bin')
self.partition_desc_bin = binary('1up-md.bin')
self.system_port = self.membus.slave
return self
def makeArmSystem(mem_mode, machine_type, mdesc = None, bare_metal=False):
assert machine_type
if bare_metal:
self = ArmSystem()
else:
self = LinuxArmSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentBus()
self.membus = MemBus()
self.membus.badaddr_responder.warn_access = "warn"
self.bridge = Bridge(delay='50ns', nack_delay='4ns')
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.mem_mode = mem_mode
if machine_type == "RealView_PBX":
self.realview = RealViewPBX()
elif machine_type == "RealView_EB":
self.realview = RealViewEB()
elif machine_type == "VExpress_ELT":
self.realview = VExpress_ELT()
elif machine_type == "VExpress_EMM":
self.realview = VExpress_EMM()
self.load_addr_mask = 0xffffffff
else:
print "Unknown Machine Type"
sys.exit(1)
self.cf0 = CowIdeDisk(driveID='master')
self.cf0.childImage(mdesc.disk())
# default to an IDE controller rather than a CF one
# assuming we've got one
try:
self.realview.ide.disks = [self.cf0]
except:
self.realview.cf_ctrl.disks = [self.cf0]
if bare_metal:
# EOT character on UART will end the simulation
self.realview.uart.end_on_eot = True
self.physmem = SimpleMemory(range = AddrRange(Addr(mdesc.mem())),
zero = True)
else:
self.kernel = binary('vmlinux.arm.smp.fb.2.6.38.8')
self.machine_type = machine_type
if convert.toMemorySize(mdesc.mem()) > int(self.realview.max_mem_size):
print "The currently selected ARM platforms doesn't support"
print " the amount of DRAM you've selected. Please try"
print " another platform"
sys.exit(1)
boot_flags = 'earlyprintk console=ttyAMA0 lpj=19988480 norandmaps ' + \
'rw loglevel=8 mem=%s root=/dev/sda1' % mdesc.mem()
self.physmem = SimpleMemory(range =
AddrRange(self.realview.mem_start_addr,
size = mdesc.mem()),
conf_table_reported = True)
self.realview.setupBootLoader(self.membus, self, binary)
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
if mdesc.disk().lower().count('android'):
boot_flags += " init=/init "
self.boot_osflags = boot_flags
self.physmem.port = self.membus.master
self.realview.attachOnChipIO(self.membus, self.bridge)
self.realview.attachIO(self.iobus)
self.intrctrl = IntrControl()
self.terminal = Terminal()
self.vncserver = VncServer()
self.system_port = self.membus.slave
return self
def makeLinuxMipsSystem(mem_mode, mdesc = None):
class BaseMalta(Malta):
ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0)
ide = IdeController(disks=[Parent.disk0, Parent.disk2],
pci_func=0, pci_dev=0, pci_bus=0)
self = LinuxMipsSystem()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.iobus = NoncoherentBus()
self.membus = MemBus()
self.bridge = Bridge(delay='50ns', nack_delay='4ns')
self.physmem = SimpleMemory(range = AddrRange('1GB'))
self.bridge.master = self.iobus.slave
self.bridge.slave = self.membus.master
self.physmem.port = self.membus.master
self.disk0 = CowIdeDisk(driveID='master')
self.disk2 = CowIdeDisk(driveID='master')
self.disk0.childImage(mdesc.disk())
self.disk2.childImage(disk('linux-bigswap2.img'))
self.malta = BaseMalta()
self.malta.attachIO(self.iobus)
self.malta.ide.pio = self.iobus.master
self.malta.ide.config = self.iobus.master
self.malta.ide.dma = self.iobus.slave
self.malta.ethernet.pio = self.iobus.master
self.malta.ethernet.config = self.iobus.master
self.malta.ethernet.dma = self.iobus.slave
self.simple_disk = SimpleDisk(disk=RawDiskImage(image_file = mdesc.disk(),
read_only = True))
self.intrctrl = IntrControl()
self.mem_mode = mem_mode
self.terminal = Terminal()
self.kernel = binary('mips/vmlinux')
self.console = binary('mips/console')
self.boot_osflags = 'root=/dev/hda1 console=ttyS0'
self.system_port = self.membus.slave
return self
def x86IOAddress(port):
IO_address_space_base = 0x8000000000000000
return IO_address_space_base + port
def connectX86ClassicSystem(x86_sys, numCPUs):
# Constants similar to x86_traits.hh
IO_address_space_base = 0x8000000000000000
pci_config_address_space_base = 0xc000000000000000
interrupts_address_space_base = 0xa000000000000000
APIC_range_size = 1 << 12;
x86_sys.membus = MemBus()
x86_sys.physmem.port = x86_sys.membus.master
# North Bridge
x86_sys.iobus = NoncoherentBus()
x86_sys.bridge = Bridge(delay='50ns', nack_delay='4ns')
x86_sys.bridge.master = x86_sys.iobus.slave
x86_sys.bridge.slave = x86_sys.membus.master
# Allow the bridge to pass through the IO APIC (two pages),
# everything in the IO address range up to the local APIC, and
# then the entire PCI address space and beyond
x86_sys.bridge.ranges = \
[
AddrRange(x86_sys.pc.south_bridge.io_apic.pio_addr,
x86_sys.pc.south_bridge.io_apic.pio_addr +
APIC_range_size - 1),
AddrRange(IO_address_space_base,
interrupts_address_space_base - 1),
AddrRange(pci_config_address_space_base,
Addr.max)
]
# Create a bridge from the IO bus to the memory bus to allow access to
# the local APIC (two pages)
x86_sys.apicbridge = Bridge(delay='50ns', nack_delay='4ns')
x86_sys.apicbridge.slave = x86_sys.iobus.master
x86_sys.apicbridge.master = x86_sys.membus.slave
x86_sys.apicbridge.ranges = [AddrRange(interrupts_address_space_base,
interrupts_address_space_base +
numCPUs * APIC_range_size
- 1)]
# connect the io bus
x86_sys.pc.attachIO(x86_sys.iobus)
x86_sys.system_port = x86_sys.membus.slave
def connectX86RubySystem(x86_sys):
# North Bridge
x86_sys.piobus = NoncoherentBus()
#
# Pio functional accesses from devices need direct access to memory
# RubyPort currently does support functional accesses. Therefore provide
# the piobus a direct connection to physical memory
#
x86_sys.piobus.master = x86_sys.physmem.port
# add the ide to the list of dma devices that later need to attach to
# dma controllers
x86_sys._dma_ports = [x86_sys.pc.south_bridge.ide.dma]
x86_sys.pc.attachIO(x86_sys.piobus, x86_sys._dma_ports)
def makeX86System(mem_mode, numCPUs = 1, mdesc = None, self = None, Ruby = False):
if self == None:
self = X86System()
if not mdesc:
# generic system
mdesc = SysConfig()
self.readfile = mdesc.script()
self.mem_mode = mem_mode
# Physical memory
self.physmem = SimpleMemory(range = AddrRange(mdesc.mem()))
# Platform
self.pc = Pc()
# Create and connect the busses required by each memory system
if Ruby:
connectX86RubySystem(self)
else:
connectX86ClassicSystem(self, numCPUs)
self.intrctrl = IntrControl()
# Disks
disk0 = CowIdeDisk(driveID='master')
disk2 = CowIdeDisk(driveID='master')
disk0.childImage(mdesc.disk())
disk2.childImage(disk('linux-bigswap2.img'))
self.pc.south_bridge.ide.disks = [disk0, disk2]
# Add in a Bios information structure.
structures = [X86SMBiosBiosInformation()]
self.smbios_table.structures = structures
# Set up the Intel MP table
base_entries = []
ext_entries = []
for i in xrange(numCPUs):
bp = X86IntelMPProcessor(
local_apic_id = i,
local_apic_version = 0x14,
enable = True,
bootstrap = (i == 0))
base_entries.append(bp)
io_apic = X86IntelMPIOAPIC(
id = numCPUs,
version = 0x11,
enable = True,
address = 0xfec00000)
self.pc.south_bridge.io_apic.apic_id = io_apic.id
base_entries.append(io_apic)
isa_bus = X86IntelMPBus(bus_id = 0, bus_type='ISA')
base_entries.append(isa_bus)
pci_bus = X86IntelMPBus(bus_id = 1, bus_type='PCI')
base_entries.append(pci_bus)
connect_busses = X86IntelMPBusHierarchy(bus_id=0,
subtractive_decode=True, parent_bus=1)
ext_entries.append(connect_busses)
pci_dev4_inta = X86IntelMPIOIntAssignment(
interrupt_type = 'INT',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 1,
source_bus_irq = 0 + (4 << 2),
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = 16)
base_entries.append(pci_dev4_inta)
def assignISAInt(irq, apicPin):
assign_8259_to_apic = X86IntelMPIOIntAssignment(
interrupt_type = 'ExtInt',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 0,
source_bus_irq = irq,
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = 0)
base_entries.append(assign_8259_to_apic)
assign_to_apic = X86IntelMPIOIntAssignment(
interrupt_type = 'INT',
polarity = 'ConformPolarity',
trigger = 'ConformTrigger',
source_bus_id = 0,
source_bus_irq = irq,
dest_io_apic_id = io_apic.id,
dest_io_apic_intin = apicPin)
base_entries.append(assign_to_apic)
assignISAInt(0, 2)
assignISAInt(1, 1)
for i in range(3, 15):
assignISAInt(i, i)
self.intel_mp_table.base_entries = base_entries
self.intel_mp_table.ext_entries = ext_entries
def makeLinuxX86System(mem_mode, numCPUs = 1, mdesc = None, Ruby = False):
self = LinuxX86System()
# Build up the x86 system and then specialize it for Linux
makeX86System(mem_mode, numCPUs, mdesc, self, Ruby)
# We assume below that there's at least 1MB of memory. We'll require 2
# just to avoid corner cases.
assert(self.physmem.range.second.getValue() >= 0x200000)
self.e820_table.entries = \
[
# Mark the first megabyte of memory as reserved
X86E820Entry(addr = 0, size = '1MB', range_type = 2),
# Mark the rest as available
X86E820Entry(addr = 0x100000,
size = '%dB' % (self.physmem.range.second - 0x100000 + 1),
range_type = 1)
]
# Command line
self.boot_osflags = 'earlyprintk=ttyS0 console=ttyS0 lpj=7999923 ' + \
'root=/dev/hda1'
return self
def makeDualRoot(full_system, testSystem, driveSystem, dumpfile):
self = Root(full_system = full_system)
self.testsys = testSystem
self.drivesys = driveSystem
self.etherlink = EtherLink()
self.etherlink.int0 = Parent.testsys.tsunami.ethernet.interface
self.etherlink.int1 = Parent.drivesys.tsunami.ethernet.interface
if hasattr(testSystem, 'realview'):
self.etherlink.int0 = Parent.testsys.realview.ethernet.interface
self.etherlink.int1 = Parent.drivesys.realview.ethernet.interface
elif hasattr(testSystem, 'tsunami'):
self.etherlink.int0 = Parent.testsys.tsunami.ethernet.interface
self.etherlink.int1 = Parent.drivesys.tsunami.ethernet.interface
else:
fatal("Don't know how to connect these system together")
if dumpfile:
self.etherdump = EtherDump(file=dumpfile)
self.etherlink.dump = Parent.etherdump
return self
|
|
#!/usr/bin/env python
import codecs, csv, json, shlex, struct, subprocess, sys
json_data = {
'groups': {},
'root': { 'g': 0, 'o': 0, 'oidx': 0, 'sa': 0, 'da': 0, 'ia': 0, 'out': []},
'nodes': [],
}
stats = dict()
with open('./Stats.csv', 'r') as f:
data = csv.DictReader(f)
for row in data:
stats[int(row['Rows'])] = row
stat_descriptions = dict()
def description_file_lines(filename):
with open(filename, 'rb') as f:
contents = f.read()
binary_lines = contents.split(b'\x0A\x00')
lines = []
for line in binary_lines:
try:
lines.append(line.decode('utf-16-le').encode('ascii').decode('ascii').strip())
except:
pass
return lines
stat_description_lines = description_file_lines('./stat_descriptions.txt')
stat_description_lines += description_file_lines('./passive_skill_stat_descriptions.txt')
for i in range(len(stat_description_lines)):
if stat_description_lines[i] == 'description':
i += 1
parameters = stat_description_lines[i].split()[1:]
i += 1
variants = []
variant_count = int(stat_description_lines[i].split()[0])
for j in range(variant_count):
i += 1
variants.append(shlex.split(stat_description_lines[i]))
stat_descriptions[tuple(parameters)] = {
'parameters': parameters,
'variants': variants,
}
def parameter_match(range, n):
split = range.split('|')
if len(split) == 1:
return split[0] == '#' or int(split[0]) == int(n)
return (split[0] == '#' or int(split[0]) <= int(n)) and (split[1] == '#' or int(split[1]) >= int(n))
def stat_text(stat_values):
ret = []
for description in stat_descriptions.values():
parameter_values = []
should_add_description = False
for parameter in description['parameters']:
if parameter in stat_values and stat_values[parameter] != '0':
parameter_values.append(stat_values[parameter])
should_add_description = True
else:
parameter_values.append('0')
if should_add_description:
for variant in description['variants']:
should_use = True
for i in range(len(parameter_values)):
if not parameter_match(variant[i], parameter_values[i]):
should_use = False
break
if not should_use:
continue
if len(variant) > len(parameter_values) + 2:
transformation = variant[len(parameter_values) + 1]
parameter = int(variant[len(parameter_values) + 2]) - 1
if transformation == 'per_minute_to_per_second':
parameter_values[parameter] = str(float(parameter_values[parameter]) / 60)
elif transformation == 'milliseconds_to_seconds':
parameter_values[parameter] = str(float(parameter_values[parameter]) / 1000)
elif transformation == 'divide_by_one_hundred':
parameter_values[parameter] = str(float(parameter_values[parameter]) / 100)
elif transformation == 'negate':
parameter_values[parameter] = str(float(parameter_values[parameter]) * -1)
else:
raise ValueError('unknown transformation', transformation)
ret.append(subprocess.check_output(['./boost-formatter', variant[len(parameter_values)]] + parameter_values).decode('ascii'))
break
return ret
passive_skills = dict()
with open('./PassiveSkills.csv', 'r') as f:
data = csv.DictReader(f)
for row in data:
passive_skills[int(row['Unknown8'])] = row
stat_ids = json.loads(row['Data0'])
stat_values = dict()
for i in range(len(stat_ids)):
stat = stats[stat_ids[i]]
stat_values[stat['Id']] = row['Stat%d' % (i + 1)]
node_json_data = {
'id': int(row['Unknown8']),
'icon': row['Icon'].replace('.dds', '.png'),
'ks': row['IsKeystone'] == 'True',
'not': row['IsNotable'] == 'True',
'dn': row['Name'],
'm': row['IsJustIcon'] == 'True',
's': row['IsSocket'] == 'True',
'spc': json.loads(row['Data1']),
'sd': stat_text(stat_values),
'sa': stat_values['base_strength'] if 'base_strength' in stat_values else 0,
'da': stat_values['base_dexterity'] if 'base_dexterity' in stat_values else 0,
'ia': stat_values['base_intelligence'] if 'base_intelligence' in stat_values else 0,
'out': []
}
json_data['nodes'].append(node_json_data)
with open('./PassiveSkillGraph.psg', 'rb') as f:
graph_data = f.read()
graph_position = 0
def read_graph_byte():
global graph_data, graph_position
ret = struct.unpack('<B', graph_data[graph_position])[0]
graph_position += 1
return ret
def read_graph_word():
global graph_data, graph_position
ret = struct.unpack('<I', graph_data[graph_position:graph_position+4])[0]
graph_position += 4
return ret
def read_graph_float():
global graph_data, graph_position
ret = struct.unpack('f', graph_data[graph_position:graph_position+4])[0]
graph_position += 4
return ret
if read_graph_byte() != 2:
print('invalid graph version')
sys.exit(1)
unknown_count = read_graph_byte()
for i in range(unknown_count):
unknown = read_graph_byte()
root_count = read_graph_word()
for i in range(root_count):
root_id = read_graph_word()
json_data['root']['out'].append(root_id)
group_count = read_graph_word()
for i in range(group_count):
x = read_graph_float()
y = read_graph_float()
group_json_data = {
'x': x, 'y': y,
'oo': {},
'n': [],
}
passive_count = read_graph_word()
for j in range(passive_count):
skill_id = read_graph_word()
skill = passive_skills[skill_id]
orbit_radius = read_graph_word()
orbit_position = read_graph_word()
connections = read_graph_word()
node_json_data = json_data['nodes'][int(skill['Rows'])]
for k in range(connections):
node_json_data['out'].append(read_graph_word())
node_json_data['g'] = i + 1
node_json_data['o'] = orbit_radius
node_json_data['oidx'] = orbit_position
group_json_data['n'].append(skill_id)
group_json_data['oo'][orbit_radius] = True
json_data['groups'][i + 1] = group_json_data
# post-processing - ideally, we wouldn't need anything below this line
with open('./merge.json', 'r') as f:
json_data.update(json.loads(f.read()))
for node in json_data['nodes']:
# make sure our starting class ids match the data we merged
# TODO: generate characterData and constants['classes'] instead
if node['dn'] == 'SIX':
node['spc'][0] = json_data['constants']['classes']['DexIntClass']
elif node['dn'] == 'Seven':
node['spc'][0] = json_data['constants']['classes']['StrDexIntClass']
elif node['dn'] == 'MARAUDER':
node['spc'][0] = json_data['constants']['classes']['StrClass']
elif node['dn'] == 'TEMPLAR':
node['spc'][0] = json_data['constants']['classes']['StrIntClass']
elif node['dn'] == 'WITCH':
node['spc'][0] = json_data['constants']['classes']['IntClass']
elif node['dn'] == 'DUELIST':
node['spc'][0] = json_data['constants']['classes']['StrDexClass']
elif node['dn'] == 'RANGER':
node['spc'][0] = json_data['constants']['classes']['DexClass']
# replace images we don't have with a placeholder
has_texture = False
if node['ks']:
for image in json_data['skillSprites']['keystoneActive']:
if node['icon'] in image['coords']:
has_texture = True
break
if not has_texture:
node['icon'] = 'Art/2DArt/SkillIcons/passives/KeystonePainAttunement.png'
elif node['not']:
for image in json_data['skillSprites']['notableActive']:
if node['icon'] in image['coords']:
has_texture = True
break
if not has_texture:
node['icon'] = 'Art/2DArt/SkillIcons/passives/savant.png'
elif node['m']:
for image in json_data['skillSprites']['mastery']:
if node['icon'] in image['coords']:
has_texture = True
break
if not has_texture:
node['icon'] = 'Art/2DArt/SkillIcons/passives/MasteryGroupMace.png'
elif node['s']:
node['icon'] = 'Art/2DArt/SkillIcons/passives/blank.png'
node['sd'] = ['1 Jewel Socket']
node['not'] = True
else:
for image in json_data['skillSprites']['normalActive']:
if node['icon'] in image['coords']:
has_texture = True
break
if not has_texture:
node['icon'] = 'Art/2DArt/SkillIcons/passives/chargestr.png'
for image in json_data['skillSprites']['notableActive']:
# make sockets look like blank notables
if image['filename'] == 'skill_sprite-active-3-62ca5131ab27a2029a98e71a1adc1ef3.jpg':
image['coords']['Art/2DArt/SkillIcons/passives/blank.png'] = {
"x": 418,
"y": 618,
"w": 38,
"h": 38,
}
break
print(json.dumps(json_data))
|
|
from __future__ import absolute_import
import re
from cStringIO import StringIO
from datetime import date, datetime, timedelta
from psycopg2.extensions import AsIs, Binary, QuotedString
from pytz import timezone
class PostgresWriter(object):
"""Base class for :py:class:`mysql2pgsql.lib.postgres_file_writer.PostgresFileWriter`
and :py:class:`mysql2pgsql.lib.postgres_db_writer.PostgresDbWriter`.
"""
def __init__(self, index_prefix, tz=False):
self.column_types = {}
self.index_prefix = index_prefix if index_prefix else ''
if tz:
self.tz = timezone('UTC')
self.tz_offset = '+00:00'
else:
self.tz = None
self.tz_offset = ''
def column_description(self, column):
return '"%s" %s' % (column['name'], self.column_type_info(column))
def column_type(self, column):
hash_key = hash(frozenset(column.items()))
self.column_types[hash_key] = self.column_type_info(column).split(" ")[0]
return self.column_types[hash_key]
def column_type_info(self, column):
"""
"""
null = "" if column['null'] else " NOT NULL"
def get_type(column):
"""This in conjunction with :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader._convert_type`
determines the PostgreSQL data type. In my opinion this is way too fugly, will need
to refactor one day.
"""
t = lambda v: not v == None
default = (' DEFAULT %s' % QuotedString(column['default']).getquoted()) if t(column['default']) else None
if column['type'] == 'char':
default = ('%s::char' % default) if t(default) else None
return default, 'character(%s)' % column['length']
elif column['type'] == 'varchar':
default = ('%s::character varying' % default) if t(default) else None
return default, 'character varying(%s)' % column['length']
elif column['type'] == 'integer':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'integer'
elif column['type'] == 'bigint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'bigint'
elif column['type'] == 'tinyint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'smallint'
elif column['type'] == 'boolean':
default = (" DEFAULT %s" % ('true' if int(column['default']) == 1 else 'false')) if t(default) else None
return default, 'boolean'
elif column['type'] == 'float':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] == 'float unsigned':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] in ('numeric', 'decimal'):
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'numeric(%s, %s)' % (column['length'] or 20, column['decimals'] or 0)
elif column['type'] == 'double precision':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'double precision'
elif column['type'] == 'datetime' or column['type'].startswith('datetime('):
default = None
if self.tz:
return default, 'timestamp with time zone'
else:
return default, 'timestamp without time zone'
elif column['type'] == 'date':
default = None
return default, 'date'
elif column['type'] == 'timestamp':
if column['default'] == None:
default = None
elif "current_timestamp()" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
elif "CURRENT_TIMESTAMP" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
elif "0000-00-00 00:00" in column['default']:
if self.tz:
default = " DEFAULT '1970-01-01T00:00:00.000000%s'" % self.tz_offset
elif "0000-00-00 00:00:00" in column['default']:
default = " DEFAULT '1970-01-01 00:00:00'"
else:
default = " DEFAULT '1970-01-01 00:00'"
if self.tz:
return default, 'timestamp with time zone'
else:
return default, 'timestamp without time zone'
elif column['type'] == 'time' or column['type'].startswith('time('):
default = " DEFAULT NOW()" if t(default) else None
if self.tz:
return default, 'time with time zone'
else:
return default, 'time without time zone'
elif column['type'] in ('blob', 'binary', 'longblob', 'mediumblob', 'tinyblob', 'varbinary'):
return default, 'bytea'
elif column['type'].startswith('binary(') or column['type'].startswith('varbinary('):
return default, 'bytea'
elif column['type'] in ('tinytext', 'mediumtext', 'longtext', 'text'):
return default, 'text'
elif column['type'].startswith('enum'):
default = (' %s::character varying' % default) if t(default) else None
enum = re.sub(r'^enum\(|\)$', '', column['type'])
# TODO: will work for "'.',',',''''" but will fail for "'.'',','.'"
max_enum_size = max([len(e.replace("''", "'")) for e in enum.split("','")])
return default, ' character varying(%s) check("%s" in (%s))' % (max_enum_size, column['name'], enum)
elif column['type'].startswith('bit('):
return ' DEFAULT %s' % column['default'].upper() if column['default'] else column['default'], 'varbit(%s)' % re.search(r'\((\d+)\)', column['type']).group(1)
elif column['type'].startswith('set('):
if default:
default = ' DEFAULT ARRAY[%s]::text[]' % ','.join(QuotedString(
v).getquoted() for v in re.search(r"'(.*)'", default).group(1).split(','))
return default, 'text[]'
else:
raise Exception('unknown %s' % column['type'])
default, column_type = get_type(column)
if column.get('auto_increment', None):
return '%s DEFAULT nextval(\'"%s_%s_seq"\'::regclass) NOT NULL' % (
column_type, column['table_name'], column['name'])
return '%s%s%s' % (column_type, (default if not default == None else ''), null)
def table_comments(self, table):
comments = []
if table.comment:
comments.append('COMMENT ON TABLE %s is %s;' % (table.name, QuotedString(table.comment).getquoted()))
for column in table.columns:
if column['comment']:
comments.append('COMMENT ON COLUMN %s.%s is %s;' % (table.name, column['name'], QuotedString(column['comment']).getquoted()))
return comments
def process_row(self, table, row):
"""Examines row data from MySQL and alters
the values when necessary to be compatible with
sending to PostgreSQL via the copy command
"""
for index, column in enumerate(table.columns):
hash_key = hash(frozenset(column.items()))
column_type = self.column_types[hash_key] if hash_key in self.column_types else self.column_type(column)
if row[index] == None and ('timestamp' not in column_type or not column['default']):
row[index] = '\N'
elif row[index] == None and column['default']:
if self.tz:
row[index] = '1970-01-01T00:00:00.000000' + self.tz_offset
else:
row[index] = '1970-01-01 00:00:00'
elif 'bit' in column_type:
row[index] = bin(ord(row[index]))[2:]
elif isinstance(row[index], (str, unicode, basestring)):
if column_type == 'bytea':
row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]
elif 'text[' in column_type:
row[index] = '{%s}' % ','.join('"%s"' % v.replace('"', r'\"') for v in row[index].split(','))
else:
row[index] = row[index].replace('\\', r'\\').replace('\n', r'\n').replace(
'\t', r'\t').replace('\r', r'\r').replace('\0', '')
elif column_type == 'boolean':
# We got here because you used a tinyint(1), if you didn't want a bool, don't use that type
row[index] = 't' if row[index] not in (None, 0) else 'f' if row[index] == 0 else row[index]
elif isinstance(row[index], (date, datetime)):
if isinstance(row[index], datetime) and self.tz:
try:
if row[index].tzinfo:
row[index] = row[index].astimezone(self.tz).isoformat()
else:
row[index] = datetime(*row[index].timetuple()[:6], tzinfo=self.tz).isoformat()
except Exception as e:
print e.message
else:
row[index] = row[index].isoformat()
elif isinstance(row[index], timedelta):
row[index] = datetime.utcfromtimestamp(_get_total_seconds(row[index])).time().isoformat()
else:
row[index] = AsIs(row[index]).getquoted()
def table_attributes(self, table):
primary_keys = []
serial_key = None
maxval = None
columns = StringIO()
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
if column['primary_key']:
primary_keys.append(column['name'])
columns.write(' %s,\n' % self.column_description(column))
return primary_keys, serial_key, maxval, columns.getvalue()[:-2]
def truncate(self, table):
serial_key = None
maxval = None
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
truncate_sql = 'TRUNCATE "%s" CASCADE;' % table.name
serial_key_sql = None
if serial_key:
serial_key_sql = "SELECT pg_catalog.setval(pg_get_serial_sequence(%(table_name)s, %(serial_key)s), %(maxval)s, true);" % {
'table_name': QuotedString('"%s"' % table.name).getquoted(),
'serial_key': QuotedString(serial_key).getquoted(),
'maxval': maxval}
return (truncate_sql, serial_key_sql)
def write_table(self, table):
primary_keys, serial_key, maxval, columns = self.table_attributes(table)
serial_key_sql = []
table_sql = []
if serial_key:
serial_key_seq = '%s_%s_seq' % (table.name, serial_key)
serial_key_sql.append('DROP SEQUENCE IF EXISTS "%s" CASCADE;' % serial_key_seq)
serial_key_sql.append("""CREATE SEQUENCE "%s" INCREMENT BY 1
NO MAXVALUE NO MINVALUE CACHE 1;""" % serial_key_seq)
serial_key_sql.append('SELECT pg_catalog.setval(\'"%s"\', %s, true);' % (serial_key_seq, maxval))
table_sql.append('DROP TABLE IF EXISTS "%s" CASCADE;' % table.name)
table_sql.append('CREATE TABLE "%s" (\n%s\n)\nWITHOUT OIDS;' % (table.name.encode('utf8'), columns))
table_sql.extend(self.table_comments(table))
return (table_sql, serial_key_sql)
def write_indexes(self, table):
index_sql = []
primary_index = [idx for idx in table.indexes if idx.get('primary', None)]
index_prefix = self.index_prefix
if primary_index:
index_sql.append('ALTER TABLE "%(table_name)s" ADD CONSTRAINT "%(index_name)s_pkey" PRIMARY KEY(%(column_names)s);' % {
'table_name': table.name,
'index_name': '%s%s_%s' % (index_prefix, table.name,
'_'.join(primary_index[0]['columns'])),
'column_names': ', '.join('"%s"' % col for col in primary_index[0]['columns']),
})
for index in table.indexes:
if 'primary' in index:
continue
unique = 'UNIQUE ' if index.get('unique', None) else ''
index_name = '%s%s_%s' % (index_prefix, table.name, '_'.join(index['columns']))
index_sql.append('DROP INDEX IF EXISTS "%s" CASCADE;' % index_name)
index_sql.append('CREATE %(unique)sINDEX "%(index_name)s" ON "%(table_name)s" (%(column_names)s);' % {
'unique': unique,
'index_name': index_name,
'table_name': table.name,
'column_names': ', '.join('"%s"' % col for col in index['columns']),
})
return index_sql
def write_constraints(self, table):
constraint_sql = []
for key in table.foreign_keys:
constraint_sql.append("""ALTER TABLE "%(table_name)s" ADD FOREIGN KEY ("%(column_name)s")
REFERENCES "%(ref_table_name)s"(%(ref_column_name)s);""" % {
'table_name': table.name,
'column_name': key['column'],
'ref_table_name': key['ref_table'],
'ref_column_name': key['ref_column']})
return constraint_sql
def write_triggers(self, table):
trigger_sql = []
for key in table.triggers:
trigger_sql.append("""CREATE OR REPLACE FUNCTION %(fn_trigger_name)s RETURNS TRIGGER AS $%(trigger_name)s$
BEGIN
%(trigger_statement)s
RETURN NULL;
END;
$%(trigger_name)s$ LANGUAGE plpgsql;""" % {
'table_name': table.name,
'trigger_time': key['timing'],
'trigger_event': key['event'],
'trigger_name': key['name'],
'fn_trigger_name': 'fn_' + key['name'] + '()',
'trigger_statement': key['statement']})
trigger_sql.append("""CREATE TRIGGER %(trigger_name)s %(trigger_time)s %(trigger_event)s ON %(table_name)s
FOR EACH ROW
EXECUTE PROCEDURE fn_%(trigger_name)s();""" % {
'table_name': table.name,
'trigger_time': key['timing'],
'trigger_event': key['event'],
'trigger_name': key['name']})
return trigger_sql
def close(self):
raise NotImplementedError
def write_contents(self, table, reader):
raise NotImplementedError
# Original fix for Py2.6: https://github.com/mozilla/mozdownload/issues/73
def _get_total_seconds(dt):
# Keep backward compatibility with Python 2.6 which doesn't have this method
if hasattr(datetime, 'total_seconds'):
return dt.total_seconds()
else:
return (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6
|
|
# -*- coding: utf8 -*-
import os
import logging
import multiprocessing
import tensorflow as tf
import numpy as np
from gym import wrappers
from yarll.agents.agent import Agent
from yarll.agents.actorcritic.actor_critic import ActorCriticNetworkDiscrete,\
ActorCriticNetworkDiscreteCNN, ActorCriticNetworkDiscreteCNNRNN, actor_critic_discrete_loss,\
ActorCriticNetworkContinuous, actor_critic_continuous_loss
from yarll.agents.env_runner import EnvRunner
from yarll.misc.utils import discount_rewards, FastSaver
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Don't use the scientific notation to print results
np.set_printoptions(suppress=True)
class A2C(Agent):
"""Advantage Actor Critic"""
def __init__(self, env, monitor_path: str, video: bool = True, **usercfg) -> None:
super(A2C, self).__init__(**usercfg)
self.monitor_path = monitor_path
self.env = wrappers.Monitor(
env,
monitor_path,
force=True,
video_callable=(None if video else False))
self.config.update(dict(
n_iter=100,
gamma=0.99,
learning_rate=0.001,
n_hidden_units=20,
n_hidden_layers=1,
gradient_clip_value=0.5,
n_local_steps=20,
vf_coef=0.5,
entropy_coef=0.01,
loss_reducer="mean",
save_model=False
))
self.config.update(usercfg)
# Only used (and overwritten) by agents that use an RNN
self.initial_features = None
self.ac_net = None # Overwritten by build_networks
self.build_networks()
self.action = self.ac_net.action
self.states = self.ac_net.states
self.actions_taken = self.ac_net.actions_taken
self.advantage = tf.placeholder(tf.float32, [None], name="advantage")
self.ret = tf.placeholder(tf.float32, [None], name="return")
self.actor_loss, self.critic_loss, self.loss = self.make_loss()
self.vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
self._global_step = tf.get_variable(
"global_step",
[],
tf.int32,
initializer=tf.constant_initializer(0, dtype=tf.int32),
trainable=False)
self.optimizer = tf.train.AdamOptimizer(
self.config["learning_rate"], name="optim")
grads = tf.gradients(self.loss, self.vars)
grads, _ = tf.clip_by_global_norm(
grads, self.config["gradient_clip_value"])
# Apply gradients to the weights of the master network
apply_grads = self.optimizer.apply_gradients(zip(grads, self.vars))
self.n_steps = tf.shape(self.states)[0]
inc_step = self._global_step.assign_add(self.n_steps)
self.train_op = tf.group(apply_grads, inc_step)
self.init_op = tf.global_variables_initializer()
# Launch the graph.
num_cpu = multiprocessing.cpu_count()
tf_config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
self.session = tf.Session(config=tf_config)
if self.config["save_model"]:
tf.add_to_collection("action", self.action)
tf.add_to_collection("states", self.states)
self.saver = FastSaver()
n_steps = tf.to_float(self.n_steps)
actor_loss_summary = tf.summary.scalar("model/actor_loss", tf.squeeze(self.actor_loss / n_steps))
critic_loss_summary = tf.summary.scalar("model/critic_loss", tf.squeeze(self.critic_loss / n_steps))
loss_summary = tf.summary.scalar("model/loss", tf.squeeze(self.loss / n_steps))
self.loss_summary_op = tf.summary.merge(
[actor_loss_summary, critic_loss_summary, loss_summary])
self.writer = tf.summary.FileWriter(os.path.join(
self.monitor_path, "summaries"), self.session.graph)
self.env_runner = EnvRunner(self.env, self, usercfg, summary_writer=self.writer)
return
def _initialize(self):
self.session.run(self.init_op)
def build_networks(self):
return NotImplementedError("Abstract method")
def make_loss(self):
return NotImplementedError("Abstract method")
@property
def global_step(self):
return self._global_step.eval(session=self.session)
def get_critic_value(self, state, features):
return self.session.run([self.ac_net.value], feed_dict={self.states: state})[0].flatten()
def choose_action(self, state, features) -> dict:
action, value = self.session.run(
[self.ac_net.action, self.ac_net.value], feed_dict={self.states: [state]})
return {"action": action, "value": value[0]}
def get_env_action(self, action) -> int:
return np.argmax(action)
def learn(self):
"""Run learning algorithm"""
self._initialize()
config = self.config
for _ in range(int(config["n_iter"])):
# Collect trajectories until we get timesteps_per_batch total timesteps
trajectory = self.env_runner.get_steps(int(self.config["n_local_steps"]))
v = 0 if trajectory.terminals[-1] else self.get_critic_value(
np.asarray(trajectory.states)[None, -1], trajectory.features[-1])
rewards_plus_v = np.asarray(trajectory.rewards + [v])
vpred_t = np.asarray(trajectory.values + [v])
delta_t = trajectory.rewards + \
self.config["gamma"] * vpred_t[1:] - vpred_t[:-1]
batch_r = discount_rewards(
rewards_plus_v, self.config["gamma"])[:-1]
batch_adv = discount_rewards(delta_t, self.config["gamma"])
fetches = [self.loss_summary_op, self.train_op, self._global_step]
states = np.asarray(trajectory.states)
feed_dict = {
self.states: states,
self.actions_taken: np.asarray(trajectory.actions),
self.advantage: batch_adv,
self.ret: np.asarray(batch_r)
}
feature = trajectory.features[0]
if feature != [] and feature is not None:
feed_dict[self.ac_net.rnn_state_in] = feature
summary, _, global_step = self.session.run(fetches, feed_dict)
self.writer.add_summary(summary, global_step)
self.writer.flush()
if self.config["save_model"]:
tf.add_to_collection("action", self.action)
tf.add_to_collection("states", self.states)
self.saver.save(self.session, os.path.join(
self.monitor_path, "model"))
class A2CDiscrete(A2C):
def __init__(self, *args, **kwargs):
super(A2CDiscrete, self).__init__(*args, **kwargs)
def build_networks(self):
self.ac_net = ActorCriticNetworkDiscrete(
list(self.env.observation_space.shape),
self.env.action_space.n,
int(self.config["n_hidden_units"]),
int(self.config["n_hidden_layers"]))
def make_loss(self):
return actor_critic_discrete_loss(
self.ac_net.logits,
self.ac_net.probs,
self.ac_net.value,
self.ac_net.actions_taken,
self.advantage,
self.ret,
self.config["vf_coef"],
self.config["entropy_coef"],
self.config["loss_reducer"]
)
class A2CDiscreteCNN(A2CDiscrete):
def build_networks(self):
self.ac_net = ActorCriticNetworkDiscreteCNN(
list(self.env.observation_space.shape),
self.env.action_space.n,
int(self.config["n_hidden_units"]))
class A2CDiscreteCNNRNN(A2CDiscrete):
def build_networks(self):
self.ac_net = ActorCriticNetworkDiscreteCNNRNN(
list(self.env.observation_space.shape),
self.env.action_space.n,
int(self.config["n_hidden_units"]))
self.initial_features = self.ac_net.state_init
def choose_action(self, state, features) -> dict:
"""Choose an action."""
feed_dict = {
self.ac_net.states: [state],
self.ac_net.rnn_state_in: features
}
action, rnn_state, value = self.session.run(
[self.ac_net.action, self.ac_net.rnn_state_out, self.ac_net.value],
feed_dict=feed_dict)
return {"action": action, "value": value[0], "features": rnn_state}
def get_critic_value(self, states, features):
feed_dict = {
self.ac_net.states: states,
self.ac_net.rnn_state_in: features
}
return self.session.run(self.ac_net.value, feed_dict=feed_dict)[0]
class A2CContinuous(A2C):
def __init__(self, *args, **kwargs):
super(A2CContinuous, self).__init__(*args, **kwargs)
def build_networks(self):
self.ac_net = ActorCriticNetworkContinuous(
list(self.env.observation_space.shape),
self.env.action_space,
int(self.config["n_hidden_units"]),
int(self.config["n_hidden_layers"]))
def make_loss(self):
return actor_critic_continuous_loss(
self.ac_net.action_log_prob,
self.ac_net.entropy,
self.ac_net.value,
self.advantage,
self.ret,
self.config["vf_coef"],
self.config["entropy_coef"],
self.config["loss_reducer"]
)
def get_env_action(self, action):
return action
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2019, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Container
########################
The :class:`~exa.core.container.Container` class is the primary object for
data processing, analysis, and visualization. In brief, containers are composed
of data objects whose contents are used for 2D and 3D visualization. Containers
also provide some content management and data relationship features.
See Also:
For a description of data objects see :mod:`~exa.core.numerical`.
"""
import os
import numpy as np
import pandas as pd
import networkx as nx
from sys import getsizeof
from copy import deepcopy
from collections import defaultdict
from .numerical import check_key, Field, Series, DataFrame
from exa.util.utility import convert_bytes
from exa.util import mpl
import matplotlib.pyplot as plt
class Container(object):
"""
Container class responsible for all features related to data management.
"""
_getter_prefix = 'compute'
_cardinal = None # Name of the cardinal data table
def copy(self, name=None, description=None, meta=None):
"""
Create a copy of the current object (may alter the container's name,
description, and update the metadata if needed).
"""
cls = self.__class__
kwargs = self._rel(copy=True)
kwargs.update(self._data(copy=True))
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if meta is not None:
kwargs['meta'] = meta
return cls(**kwargs)
def concat(self, *args, **kwargs):
"""
Concatenate any number of container objects with the current object into
a single container object.
See Also:
For argument description, see :func:`~exa.core.container.concat`.
"""
raise NotImplementedError()
def slice_naive(self, key):
"""
Naively slice each data object in the container by the object's index.
Args:
key: Int, slice, or list by which to extra "sub"-container
Returns:
sub: Sub container of the same format with a view of the data
Warning:
To ensure that a new container is created, use the copy method.
.. code-block:: Python
mycontainer[slice].copy()
"""
kwargs = {'name': self.name, 'description': self.description, 'meta': self.meta}
for name, data in self._data().items():
k = name[1:] if name.startswith('_') else name
kwargs[k] = data.slice_naive(key)
return self.__class__(**kwargs)
def slice_cardinal(self, key):
"""
Slice the container according to its (primary) cardinal axis.
The "cardinal" axis can have any name so long as the name matches a
data object attached to the container. The index name for this object
should also match the value of the cardinal axis.
The algorithm builds a network graph representing the data relationships
(including information about the type of relationship) and then traverses
the edge tree (starting from the cardinal table). Each subsequent child
object in the tree is sliced based on its relationship with its parent.
Note:
Breadth first traversal is performed.
Warning:
This function does not make a copy (if possible): to ensure a new
object is created (a copy) use :func:`~exa.core.container.Container.copy`
after slicing.
.. code-block:: Python
myslice = mycontainer[::2].copy()
See Also:
For data network generation, see :func:`~exa.core.container.Container.network`.
For information about relationships between data objects see
:mod:`~exa.core.numerical`.
"""
if self._cardinal:
cls = self.__class__
key = check_key(self[self._cardinal], key, cardinal=True)
g = self.network(fig=False)
kwargs = {self._cardinal: self[self._cardinal].ix[key], 'name': self.name,
'description': self.description, 'meta': self.meta}
# Next traverse, breadth first, all data objects
for parent, child in nx.bfs_edges(g, self._cardinal):
if child in kwargs:
continue
typ = g.edge_types[(parent, child)]
if self._cardinal in self[child].columns and hasattr(self[child], 'slice_cardinal'):
kwargs[child] = self[child].slice_cardinal(key)
elif typ == 'index-index':
# Select from the child on the parent's index (the parent is
# in the kwargs already).
kwargs[child] = self[child].ix[kwargs[parent].index.values]
elif typ == 'index-column':
# Select from the child where the column (of the same name as
# the parent) is in the parent's index values
cdf = self[child]
kwargs[child] = cdf[cdf[parent].isin(kwargs[parent].index.values)]
elif typ == 'column-index':
# Select from the child where the child's index is in the
# column of the parent. Note that this relationship
cdf = self[child]
cin = cdf.index.name
cols = [col for col in kwargs[parent] if cin == col or (cin == col[:-1] and col[-1].isdigit())]
index = kwargs[parent][cols].stack().astype(np.int64).values
kwargs[child] = cdf[cdf.index.isin(index)]
return cls(**kwargs)
def cardinal_groupby(self):
"""
Create an instance of this class for every step in the cardinal dimension.
"""
if self._cardinal:
g = self.network(fig=False)
cardinal_indexes = self[self._cardinal].index.values
selfs = {}
cls = self.__class__
for cardinal_index in cardinal_indexes:
kwargs = {self._cardinal: self[self._cardinal].ix[[cardinal_index]]}
for parent, child in nx.bfs_edges(g):
if child in kwargs:
continue
typ = g.edge_types[(parent, child)]
if self._cardinal in self[child].columns and hasattr(self[child], 'slice_cardinal'):
kwargs[child] = self[child].slice_cardinal(key)
elif typ == 'index-index':
# Select from the child on the parent's index (the parent is
# in the kwargs already).
kwargs[child] = self[child].ix[kwargs[parent].index.values]
elif typ == 'index-column':
# Select from the child where the column (of the same name as
# the parent) is in the parent's index values
cdf = self[child]
kwargs[child] = cdf[cdf[parent].isin(kwargs[parent].index.values)]
elif typ == 'column-index':
# Select from the child where the child's index is in the
# column of the parent. Note that this relationship
cdf = self[child]
cin = cdf.index.name
cols = [col for col in kwargs[parent] if cin == col or (cin == col[:-1] and col[-1].isdigit())]
index = kwargs[parent][cols].stack().astype(np.int64).values
kwargs[child] = cdf[cdf.index.isin(index)]
selfs[cardinal_index] = cls(**kwargs)
return selfs
def info(self):
"""
Display information about the container's data objects (note that info
on metadata and visualization objects is also provided).
Note:
Sizes are reported in bytes.
"""
names = []
types = []
sizes = []
names.append('WIDGET')
types.append('-')
s = 0
sizes.append(s)
names.append('METADATA')
types.append('-')
s = 0
for obj in self._rel().values():
s += getsizeof(obj)
sizes.append(s)
for name, obj in self._data().items():
names.append(name[1:] if name.startswith('_') else name)
types.append('.'.join((obj.__module__, obj.__class__.__name__)))
if isinstance(obj, pd.Series):
sizes.append(obj.memory_usage())
else:
sizes.append(obj.memory_usage().sum())
inf = pd.DataFrame.from_dict({'object': names, 'type': types, 'size': sizes})
inf.set_index('object', inplace=True)
return inf.sort_index()
def memory_usage(self, string=False):
"""
Get the memory usage estimate of the container.
Args:
string (bool): Human readable string (default false)
See Also:
:func:`~exa.core.container.Container.info`
"""
if string:
n = getsizeof(self)
return ' '.join((str(s) for s in convert_bytes(n)))
return self.info()['size']
def network(self, figsize=(14, 9), fig=True):
"""
Display information about the container's object relationships.
Nodes correspond to data objects. The size of the node corresponds
to the size of the table in memory. The color of the node corresponds
to its fundamental data type. Nodes are labeled by their container
name; class information is listed below. The color of the connections
correspond to the type of relationship; either an index of one table
corresponds to a column in another table or the two tables share an
index.
Args:
figsize (tuple): Tuple containing figure dimensions
fig (bool): Generate the figure (default true)
Returns:
graph: Network graph object containing data relationships
"""
conn_types = ['index-index', 'index-column']
conn_colors = mpl.sns.color_palette('viridis', len(conn_types))
conn = dict(zip(conn_types, conn_colors))
def get_node_type_color(obj):
"""Gets the color of a node based on the node's (sub)type."""
cols = mpl.sns.color_palette('viridis', len(conn_types))
for col in cols:
if isinstance(obj, (pd.DataFrame, pd.Series, pd.SparseSeries, pd.SparseDataFrame)):
typ = type(obj)
return '.'.join((typ.__module__, typ.__name__)), col
return 'other', 'gray'
def legend(items, name, loc, ax):
"""Legend creation helper function."""
proxies = []
descriptions = []
for label, color in items:
if label == 'column-index':
continue
if name == 'Data Type':
line = mpl.sns.mpl.lines.Line2D([], [], linestyle='none', color=color, marker='o')
else:
line = mpl.sns.mpl.lines.Line2D([], [], linestyle='-', color=color)
proxies.append(line)
descriptions.append(label)
lgnd = ax.legend(proxies, descriptions, title=name, loc=loc, frameon=True)
lgnd_frame = lgnd.get_frame()
lgnd_frame.set_facecolor('white')
lgnd_frame.set_edgecolor('black')
return lgnd, ax
info = self.info()
info = info[info['type'] != '-']
info['size'] *= 13000/info['size'].max()
info['size'] += 2000
node_size_dict = info['size'].to_dict() # Can pull all nodes from keys
node_class_name_dict = info['type'].to_dict()
node_type_dict = {} # Values are tuple of "underlying" type and color
node_conn_dict = {} # Values are tuple of connection type and color
items = self._data().items()
for k0, v0 in items:
n0 = k0[1:] if k0.startswith('_') else k0
node_type_dict[n0] = get_node_type_color(v0)
for k1, v1 in items:
if v0 is v1:
continue
n1 = k1[1:] if k1.startswith('_') else k1
for name in v0.index.names: # Check the index of data object 0 against the index
if name is None: # and columns of data object 1
continue
if name in v1.index.names:
contyp = 'index-index'
node_conn_dict[(n0, n1)] = (contyp, conn[contyp])
node_conn_dict[(n1, n0)] = (contyp, conn[contyp])
for col in v1.columns:
# Catches index "atom", column "atom1"; does not catch atom10
if name == col or (name == col[:-1] and col[-1].isdigit()):
contyp = 'index-column'
node_conn_dict[(n0, n1)] = (contyp, conn[contyp])
node_conn_dict[(n1, n0)] = ('column-index', conn[contyp])
g = nx.Graph()
g.add_nodes_from(node_size_dict.keys())
g.add_edges_from(node_conn_dict.keys())
node_sizes = [node_size_dict[node] for node in g.nodes()]
node_labels = {node: ' {}\n({})'.format(node, node_class_name_dict[node]) for node in g.nodes()}
node_colors = [node_type_dict[node][1] for node in g.nodes()]
edge_colors = [node_conn_dict[edge][1] for edge in g.edges()]
# Build the figure and legends
if fig:
fig, ax = plt.subplots(1, figsize=figsize)
ax.axis('off')
pos = nx.spring_layout(g)
nx.draw_networkx_nodes(g, pos=pos, ax=ax, alpha=0.7, node_size=node_sizes,
node_color=node_colors)
nx.draw_networkx_labels(g, pos=pos, labels=node_labels, font_size=17,
font_weight='bold', ax=ax)
nx.draw_networkx_edges(g, pos=pos, edge_color=edge_colors, width=2, ax=ax)
l1, ax = legend(set(node_conn_dict.values()), 'Connection', (1, 0), ax)
_, ax = legend(set(node_type_dict.values()), 'Data Type', (1, 0.3), ax)
fig.gca().add_artist(l1)
g.edge_types = {node: value[0] for node, value in node_conn_dict.items()} # Attached connection information to network graph
return g
def save(self, path=None, complevel=1, complib='zlib'):
"""
Save the container as an HDF5 archive.
Args:
path (str): Path where to save the container
"""
if path is None:
path = self.hexuid + '.hdf5'
elif os.path.isdir(path):
path += os.sep + self.hexuid + '.hdf5'
elif not (path.endswith('.hdf5') or path.endswith('.hdf')):
raise ValueError('File path must have a ".hdf5" or ".hdf" extension.')
with pd.HDFStore(path, 'w', complevel=complevel, complib=complib) as store:
store['kwargs'] = pd.Series()
store.get_storer('kwargs').attrs.metadata = self._rel()
fc = 0 # Field counter (see special handling of fields below)
for name, data in self._data().items():
if hasattr(data, '_revert_categories'):
data._revert_categories()
name = name[1:] if name.startswith('_') else name
if isinstance(data, Field): # Fields are handled separately
fname = 'FIELD{}_'.format(fc) + name + '/'
store[fname + 'data'] = pd.DataFrame(data)
for i, field in enumerate(data.field_values):
ffname = fname + 'values' + str(i)
if isinstance(field, pd.Series):
store[ffname] = pd.Series(field)
else:
store[ffname] = pd.DataFrame(field)
fc += 1
elif isinstance(data, Series):
s = pd.Series(data)
if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):
s = s.astype('O')
store[name] = s
elif isinstance(data, DataFrame):
store[name] = pd.DataFrame(data)
elif isinstance(data, SparseSeries):
s = pd.SparseSeries(data)
if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):
s = s.astype('O')
store[name] = s
elif isinstance(data, SparseDataFrame):
store[name] = pd.SparseDataFrame(data)
else:
if hasattr(data, 'dtype') and isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):
data = data.astype('O')
else:
for col in data:
if isinstance(data[col].dtype, pd.types.dtypes.CategoricalDtype):
data[col] = data[col].astype('O')
store[name] = data
if hasattr(data, '_set_categories'):
data._set_categories()
def to_hdf(self, *args, **kwargs):
"""Alias of :func:`~exa.core.container.Container`."""
self.save(*args, **kwargs)
@classmethod
def load(cls, pkid_or_path=None):
"""
Load a container object from a persistent location or file path.
Args:
pkid_or_path: Integer pkid corresponding to the container table or file path
Returns:
container: The saved container object
"""
path = pkid_or_path
if isinstance(path, (int, np.int32, np.int64)):
raise NotImplementedError('Lookup via CMS not implemented.')
elif not os.path.isfile(path):
raise FileNotFoundError('File {} not found.'.format(path))
kwargs = {}
fields = defaultdict(dict)
with pd.HDFStore(path) as store:
for key in store.keys():
if 'kwargs' in key:
kwargs.update(store.get_storer(key).attrs.metadata)
elif "FIELD" in key:
name, dname = "_".join(key.split("_")[1:]).split("/")
dname = dname.replace('values', '')
fields[name][dname] = store[key]
else:
name = str(key[1:])
kwargs[name] = store[key]
for name, field_data in fields.items():
fps = field_data.pop('data')
kwargs[name] = Field(fps, field_values=[field_data[str(arr)] for arr in
sorted(map(int, field_data.keys()))])
return cls(**kwargs)
@classmethod
def from_hdf(cls, *args, **kwargs):
"""Alias for :func:`~exa.core.container.Container`."""
return cls.load(*args, **kwargs)
def _rel(self, copy=False):
"""
Get descriptive kwargs of the container (e.g. name, description, meta).
"""
rel = {}
for key, obj in vars(self).items():
if not isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)) and not key.startswith('_'):
if copy and 'id' not in key:
rel[key] = deepcopy(obj)
else:
rel[key] = obj
return rel
def _data(self, copy=False):
"""
Get data kwargs of the container (i.e. dataframe and series objects).
"""
data = {}
for key, obj in vars(self).items():
if isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)):
if copy:
data[key] = obj.copy(deep=True)
else:
data[key] = obj
return data
def __delitem__(self, key):
if key in vars(self):
del self.__dict__[key]
def __sizeof__(self):
"""Note that this function must return a Python integer."""
return int(self.info()['size'].sum())
def __getitem__(self, key):
if isinstance(key, str):
return getattr(self, key)
elif isinstance(key, (int, slice, list)) and self._cardinal is None:
return self.slice_naive(key)
elif isinstance(key, (int, slice, list)) and self._cardinal is not None:
return self.slice_cardinal(key)
raise KeyError()
def __init__(self, name=None, description=None, meta=None, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.name = name
self.description = description
self.meta = meta
class TypedMeta(type):
"""
This metaclass creates statically typed class attributes using the property
framework.
.. code-block:: Python
class TestMeta(TypedMeta):
attr1 = (int, float)
attr2 = DataFrame
class TestClass(metaclass=TestMeta):
def __init__(self, attr1, attr2):
self.attr1 = attr1
self.attr2 = attr2
The above code dynamically creates code that looks like the following:
.. code-block:: Python
class TestClass:
@property
def attr1(self):
return self._attr1
@attr1.setter
def attr1(self, obj):
if not isinstance(obj, (int, float)):
raise TypeError('attr1 must be int')
self._attr1 = obj
@attr1.deleter
def attr1(self):
del self._attr1
@property
def attr2(self):
return self._attr2
@attr2.setter
def attr2(self, obj):
if not isinstance(obj, DataFrame):
raise TypeError('attr2 must be DataFrame')
self._attr2 = obj
@attr2.deleter
def attr2(self):
del self._attr2
def __init__(self, attr1, attr2):
self.attr1 = attr1
self.attr2 = attr2
"""
@staticmethod
def create_property(name, ptype):
"""
Creates a custom property with a getter that performs computing
functionality (if available) and raise a type error if setting
with the wrong type.
Note:
By default, the setter attempts to convert the object to the
correct type; a type error is raised if this fails.
"""
pname = '_' + name
def getter(self):
# This will be where the data is store (e.g. self._name)
# This is the default property "getter" for container data objects.
# If the property value is None, this function will check for a
# convenience method with the signature, self.compute_name() and call
# it prior to returning the property value.
if not hasattr(self, pname) and hasattr(self, '{}{}'.format(self._getter_prefix, pname)):
self['{}{}'.format(self._getter_prefix, pname)]()
if not hasattr(self, pname):
raise AttributeError('Please compute or set {} first.'.format(name))
return getattr(self, pname)
def setter(self, obj):
# This is the default property "setter" for container data objects.
# Prior to setting a property value, this function checks that the
# object's type is correct.
if not isinstance(obj, ptype):
try:
obj = ptype(obj)
except Exception:
raise TypeError('Must be able to convert object {0} to {1} (or must be of type {1})'.format(name, ptype))
setattr(self, pname, obj)
def deleter(self):
# Deletes the property's value.
del self[pname]
return property(getter, setter, deleter)
def __new__(mcs, name, bases, clsdict):
"""
Modification of the class definition occurs here; we iterate over all
statically typed attributes and attach their property (see
:func:`~exa.container.TypedMeta.create_property`) definition, returning
the new class definition.
"""
for k, v in vars(mcs).items():
if isinstance(v, type) and not k.startswith('_'):
clsdict[k] = mcs.create_property(k, v)
return super(TypedMeta, mcs).__new__(mcs, name, bases, clsdict)
|
|
from six.moves import builtins
from unittest import TestCase
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from .mocking import mockOpen
from dark.filter import ReadSetFilter, TitleFilter
from dark.reads import Read
from dark.titles import TitleAlignment, TitleAlignments
class TitleFilterTest(TestCase):
"""
Tests for the L{dark.filter.TitleFilter} class.
"""
def testNoRestriction(self):
"""
Testing for acceptance against a title filter that has no
restrictions should return C{TitleFilter.DEFAULT_ACCEPT}.
"""
tf = TitleFilter()
self.assertEqual(TitleFilter.DEFAULT_ACCEPT, tf.accept('hey'))
def testPositiveRegex(self):
"""
Testing for acceptance against a title filter with a positive regex
must work.
"""
tf = TitleFilter(positiveRegex=r'x+\s')
self.assertEqual(TitleFilter.DEFAULT_ACCEPT, tf.accept('hey xxx you'))
self.assertEqual(TitleFilter.REJECT, tf.accept('hey xxyou'))
def testNegativeRegex(self):
"""
Testing for acceptance against a title filter with a negative regex
must work.
"""
tf = TitleFilter(negativeRegex=r'x+\s')
self.assertEqual(TitleFilter.REJECT, tf.accept('hey xxx you'))
self.assertEqual(TitleFilter.DEFAULT_ACCEPT, tf.accept('hey xxyou'))
def testPositiveRegexHasPrecedenceOverRepeatedTruncatedTitle(self):
"""
Testing for acceptance against a title filter with a positive regex
must have precedence over checking for truncated titles when the same
non-matching title (that will be truncated) is passed twice.
"""
tf = TitleFilter(positiveRegex=r'xxxxx', truncateAfter='virus')
self.assertEqual(TitleFilter.REJECT, tf.accept('spotty virus 1'))
self.assertEqual(TitleFilter.REJECT, tf.accept('spotty virus 1'))
def testNegativeRegexHasPrecedenceOverRepeatedTruncatedTitle(self):
"""
Testing for acceptance against a title filter with a negative regex
must have precedence over checking for truncated titles when the same
matching title (that will be truncated) is passed twice.
"""
tf = TitleFilter(negativeRegex=r'spotty', truncateAfter='virus')
self.assertEqual(TitleFilter.REJECT, tf.accept('spotty virus 1'))
self.assertEqual(TitleFilter.REJECT, tf.accept('spotty virus 1'))
def testFullWordTruncation(self):
"""
Testing for acceptance against a title filter with title truncation
in effect must work if the title contains the C{truncateAfter} string
as a distint word.
"""
tf = TitleFilter(truncateAfter=r'virus')
# Note that the truncation code will chop off the first part of the
# title (the title ID).
self.assertEqual(TitleFilter.DEFAULT_ACCEPT,
tf.accept('gi|400684|gb|AY421767.1| herpes virus 1'))
self.assertEqual(TitleFilter.REJECT,
tf.accept('gi|400684|gb|AY421767.1| herpes virus 2'))
def testPartialWordTruncation(self):
"""
Testing for acceptance against a title filter with title truncation
in effect must work if the title contains the C{truncateAfter} string
as a partial word.
"""
tf = TitleFilter(truncateAfter=r'virus')
# Note that the truncation code will chop off the first part of the
# title (the title ID).
self.assertEqual(TitleFilter.DEFAULT_ACCEPT,
tf.accept('gi|400684|gb|AY421767.1| rotavirus 1'))
self.assertEqual(TitleFilter.REJECT,
tf.accept('gi|400684|gb|AY421767.1| rotavirus 2'))
def testWordTruncationRepeat(self):
"""
Testing for acceptance against a title filter with title truncation
in effect must allow the exact same title twice, even if the title
is being truncated.
"""
tf = TitleFilter(truncateAfter=r'virus')
# Note that the truncation code will chop off the first part of the
# title (the title ID).
self.assertEqual(TitleFilter.DEFAULT_ACCEPT,
tf.accept('gi|400684|gb|AY421767.1| herpes virus 1'))
self.assertEqual(TitleFilter.DEFAULT_ACCEPT,
tf.accept('gi|400684|gb|AY421767.1| herpes virus 1'))
def testWhitelist(self):
"""
Testing for acceptance against a title filter with a whitelist
must work even when a title is ruled out for other violations.
"""
tf = TitleFilter(whitelist=['always ok'], negativeRegex='ok')
self.assertEqual(TitleFilter.WHITELIST_ACCEPT, tf.accept('always ok'))
self.assertEqual(TitleFilter.REJECT, tf.accept('always ok not'))
def testBlacklist(self):
"""
Testing for acceptance against a title filter with a blacklist
must work.
"""
tf = TitleFilter(blacklist=['never ok'], positiveRegex='ok')
self.assertEqual(TitleFilter.REJECT, tf.accept('never ok'))
def testBlacklistFile(self):
"""
Testing for acceptance against a title filter with a blacklist file.
"""
data = '\n'.join(['id1', 'id2']) + '\n'
mockOpener = mockOpen(read_data=data)
with patch.object(builtins, 'open', mockOpener):
tf = TitleFilter(blacklistFile='black.txt')
self.assertEqual(TitleFilter.REJECT, tf.accept('id1'))
self.assertEqual(TitleFilter.REJECT, tf.accept('id2'))
self.assertEqual(TitleFilter.DEFAULT_ACCEPT, tf.accept('id3'))
def testBlacklistFileAndBlacklist(self):
"""
Testing for acceptance against a title filter with a blacklist file and
some specific other blacklist titles.
"""
data = '\n'.join(['id1', 'id2']) + '\n'
mockOpener = mockOpen(read_data=data)
with patch.object(builtins, 'open', mockOpener):
tf = TitleFilter(blacklistFile='black.txt', blacklist=set(['id3']))
self.assertEqual(TitleFilter.REJECT, tf.accept('id1'))
self.assertEqual(TitleFilter.REJECT, tf.accept('id2'))
self.assertEqual(TitleFilter.REJECT, tf.accept('id3'))
self.assertEqual(TitleFilter.DEFAULT_ACCEPT, tf.accept('id4'))
def testWhitelistTakesPrecedenceOverBlacklist(self):
"""
Testing for acceptance against a title filter with a whitelist
and a blacklist that contain the same title must work (the whitelist
takes precedence).
"""
tf = TitleFilter(whitelist=['always ok'], blacklist=['always ok'])
self.assertEqual(TitleFilter.WHITELIST_ACCEPT, tf.accept('always ok'))
def testWhitelistOnly(self):
"""
Testing for acceptance against a title filter with a whitelist
and a negative regex that matches everything.
"""
tf = TitleFilter(whitelist=['always ok'], negativeRegex='.')
self.assertEqual(TitleFilter.WHITELIST_ACCEPT, tf.accept('always ok'))
self.assertEqual(TitleFilter.REJECT, tf.accept('always not ok'))
self.assertEqual(TitleFilter.REJECT, tf.accept('rubbish'))
def testWhitelistFileOnly(self):
"""
Testing for acceptance against a title filter with a whitelist file
and a negative regex that matches everything.
"""
data = '\n'.join(['id1', 'id2']) + '\n'
mockOpener = mockOpen(read_data=data)
with patch.object(builtins, 'open', mockOpener):
tf = TitleFilter(whitelistFile='white.txt', negativeRegex='.')
self.assertEqual(TitleFilter.WHITELIST_ACCEPT, tf.accept('id1'))
self.assertEqual(TitleFilter.WHITELIST_ACCEPT, tf.accept('id2'))
self.assertEqual(TitleFilter.REJECT, tf.accept('id3'))
def testWhitelistFileAndWhitelistOnly(self):
"""
Testing for acceptance against a title filter with a whitelist file
and some specific whitelist titles, with a negative regex that matches
everything.
"""
data = '\n'.join(['id1', 'id2']) + '\n'
mockOpener = mockOpen(read_data=data)
with patch.object(builtins, 'open', mockOpener):
tf = TitleFilter(whitelistFile='white.txt', whitelist=set(['id3']),
negativeRegex='.')
self.assertEqual(TitleFilter.WHITELIST_ACCEPT, tf.accept('id1'))
self.assertEqual(TitleFilter.WHITELIST_ACCEPT, tf.accept('id2'))
self.assertEqual(TitleFilter.WHITELIST_ACCEPT, tf.accept('id3'))
self.assertEqual(TitleFilter.REJECT, tf.accept('id4'))
class ReadSetTest(TestCase):
"""
Tests for the L{dark.filter.ReadSetFilter} class.
"""
def makeTitleAlignments(self, *readIds):
"""
Create a TitleAlignments instance containing reads with the
ids given by C{ids}.
param readIds: A C{list} of integer ids for reads.
@return: A C{TitleAlignments} instance with reads with the given ids.
"""
titleAlignments = TitleAlignments('subject title', 55)
for readId in readIds:
titleAlignment = TitleAlignment(Read('id' + str(readId), 'A'), [])
titleAlignments.addAlignment(titleAlignment)
return titleAlignments
def testFirstUse(self):
"""
Testing for acceptance against a read set filter that has not been
used should return C{True}.
"""
titleAlignments = self.makeTitleAlignments()
rsf = ReadSetFilter(0.9)
self.assertTrue(rsf.accept('title1', titleAlignments))
def testDuplicateSingleRead(self):
"""
Testing for acceptance against a read set filter that has already
seen the exact set should return C{False} if the C{minNew} threshold
is non-zero.
"""
rsf = ReadSetFilter(0.9)
rsf.accept('title1', self.makeTitleAlignments(0))
self.assertFalse(rsf.accept('title2', self.makeTitleAlignments(0)))
def testDuplicateSingleReadZeroThreshold(self):
"""
Testing for acceptance against a read set filter that has already
seen the exact set should return C{True} if the C{minNew} threshold
is zero.
"""
rsf = ReadSetFilter(0.0)
rsf.accept('title1', self.makeTitleAlignments(0))
self.assertTrue(rsf.accept('title2', self.makeTitleAlignments(0)))
def testDifferentSet(self):
"""
Testing for acceptance against a read set filter that has seen a set
should return C{True} if the new set is totally different.
"""
rsf = ReadSetFilter(1.0)
rsf.accept('title1', self.makeTitleAlignments(0))
self.assertTrue(rsf.accept('title2', self.makeTitleAlignments(1)))
def testSufficientlyDifferent(self):
"""
Testing for acceptance against a read set filter that has seen several
sets should return C{True} if the new set is sufficiently different.
"""
rsf = ReadSetFilter(0.5)
rsf.accept('title1', self.makeTitleAlignments(0, 1, 2, 3, 4))
rsf.accept('title2', self.makeTitleAlignments(5, 6, 7, 8, 9))
self.assertTrue(rsf.accept('title3',
self.makeTitleAlignments(0, 1, 2, 5, 6, 7)))
def testInsufficientlyDifferent(self):
"""
Testing for acceptance against a read set filter that has seen several
sets should return C{False} if the new set is insufficiently different.
"""
rsf = ReadSetFilter(0.5)
rsf.accept('title1', self.makeTitleAlignments(0, 1, 2, 3, 4))
rsf.accept('title2', self.makeTitleAlignments(5, 6, 7, 8, 9))
self.assertFalse(rsf.accept('title3',
self.makeTitleAlignments(0, 1, 2, 11)))
def testThresholdRoundsUp(self):
"""
Testing for acceptance should round up the needed number of new reads.
"""
rsf = ReadSetFilter(0.5)
rsf.accept('title1', self.makeTitleAlignments(0, 1, 2, 3, 4))
# If we pass a read set of size three, two of the reads will need to be
# different.
self.assertFalse(rsf.accept('title2',
self.makeTitleAlignments(0, 1, 6)))
def testRepeatTitle(self):
"""
Testing for acceptance on a title that has been seen before (in an
accepted read set) must raise C{AssertionError}.
"""
rsf = ReadSetFilter(0.5)
rsf.accept('title1', self.makeTitleAlignments(0, 1, 2, 3, 4))
self.assertRaises(AssertionError, rsf.accept, 'title1',
self.makeTitleAlignments())
def testInvalidates(self):
"""
It must be possible to retrieve the list of titles that were
invalidated by an earlier title's read set.
"""
rsf = ReadSetFilter(0.5)
rsf.accept('title1', self.makeTitleAlignments(0))
rsf.accept('title2', self.makeTitleAlignments(0))
rsf.accept('title3', self.makeTitleAlignments(1))
rsf.accept('title4', self.makeTitleAlignments(0))
self.assertEqual(['title2', 'title4'], rsf.invalidates('title1'))
def testInvalidatesEmpty(self):
"""
The list of titles invalidated by an earlier title that didn't
invalidate anything must be empty.
"""
rsf = ReadSetFilter(0.5)
self.assertEqual([], rsf.invalidates('title1'))
class FakeCursor(object):
def __init__(self, results):
self._results = results
self._index = -1
def execute(self, p):
pass
def fetchone(self):
self._index += 1
return self._results[self._index]
def close(self):
pass
class FakeDbConnection(object):
"""
FakeDbConnection and FakeCursor fake results
for database calls.
"""
def __init__(self, results):
self._results = results
self.open = True
def cursor(self):
return FakeCursor(self._results)
def close(self):
self.open = False
|
|
from __future__ import print_function
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import numpy as np #pip install numpy
import msgpack
import math
import time
import sys
import os
import inspect
import types
import re
class MsgpackMixin:
def to_msgpack(self, *args, **kwargs):
return self.__dict__ #msgpack.dump(self.to_dict(*args, **kwargs))
@classmethod
def from_msgpack(cls, encoded):
obj = cls()
obj.__dict__ = {k.decode('utf-8'): v for k, v in encoded.items()}
return obj
class AirSimImageType:
Scene = 0
DepthPlanner = 1
DepthPerspective = 2
DepthVis = 3
DisparityNormalized = 4
Segmentation = 5
SurfaceNormals = 6
class DrivetrainType:
MaxDegreeOfFreedom = 0
ForwardOnly = 1
class LandedState:
Landed = 0
Flying = 1
class Vector3r(MsgpackMixin):
x_val = np.float32(0)
y_val = np.float32(0)
z_val = np.float32(0)
def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0)):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
class Quaternionr(MsgpackMixin):
w_val = np.float32(0)
x_val = np.float32(0)
y_val = np.float32(0)
z_val = np.float32(0)
def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0), w_val = np.float32(1)):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
self.w_val = w_val
class Pose(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
def __init__(self, position_val, orientation_val):
self.position = position_val
self.orientation = orientation_val
class CollisionInfo(MsgpackMixin):
has_collided = False
normal = Vector3r()
impact_point = Vector3r()
position = Vector3r()
penetration_depth = np.float32(0)
time_stamp = np.float32(0)
object_name = ""
object_id = -1
class GeoPoint(MsgpackMixin):
latitude = 0.0
longitude = 0.0
altitude = 0.0
class YawMode(MsgpackMixin):
is_rate = True
yaw_or_rate = 0.0
def __init__(self, is_rate = True, yaw_or_rate = 0.0):
self.is_rate = is_rate
self.yaw_or_rate = yaw_or_rate
class ImageRequest(MsgpackMixin):
camera_id = np.uint8(0)
image_type = AirSimImageType.Scene
pixels_as_float = False
compress = False
def __init__(self, camera_id, image_type, pixels_as_float = False, compress = True):
self.camera_id = camera_id
self.image_type = image_type
self.pixels_as_float = pixels_as_float
self.compress = compress
class ImageResponse(MsgpackMixin):
image_data_uint8 = np.uint8(0)
image_data_float = np.float32(0)
camera_position = Vector3r()
camera_orientation = Quaternionr()
time_stamp = np.uint64(0)
message = ''
pixels_as_float = np.float32(0)
compress = True
width = 0
height = 0
image_type = AirSimImageType.Scene
class CarControls(MsgpackMixin):
throttle = np.float32(0)
steering = np.float32(0)
brake = np.float32(0)
handbrake = False
is_manual_gear = False
manual_gear = 0
gear_immediate = True
def set_throttle(self, throttle_val, forward):
if (forward):
is_manual_gear = False
manual_gear = 0
throttle = abs(throttle_val)
else:
is_manual_gear = False
manual_gear = -1
throttle = - abs(throttle_val)
class CarState(MsgpackMixin):
speed = np.float32(0)
gear = 0
position = Vector3r()
velocity = Vector3r()
orientation = Quaternionr()
class AirSimClientBase:
def __init__(self, ip, port):
self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = 3600)
def ping(self):
return self.client.call('ping')
def reset(self):
self.client.call('reset')
def confirmConnection(self):
print('Waiting for connection: ', end='')
home = self.getHomeGeoPoint()
while ((home.latitude == 0 and home.longitude == 0 and home.altitude == 0) or
math.isnan(home.latitude) or math.isnan(home.longitude) or math.isnan(home.altitude)):
time.sleep(1)
home = self.getHomeGeoPoint()
print('X', end='')
print('')
def getHomeGeoPoint(self):
return GeoPoint.from_msgpack(self.client.call('getHomeGeoPoint'))
# basic flight control
def enableApiControl(self, is_enabled):
return self.client.call('enableApiControl', is_enabled)
def isApiControlEnabled(self):
return self.client.call('isApiControlEnabled')
def simSetSegmentationObjectID(self, mesh_name, object_id, is_name_regex = False):
return self.client.call('simSetSegmentationObjectID', mesh_name, object_id, is_name_regex)
def simGetSegmentationObjectID(self, mesh_name):
return self.client.call('simGetSegmentationObjectID', mesh_name)
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the AirSimImageType members
def simGetImage(self, camera_id, image_type):
# because this method returns std::vector<uint8>, msgpack decides to encode it as a string unfortunately.
result = self.client.call('simGetImage', camera_id, image_type)
if (result == "" or result == "\0"):
return None
return result
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the AirSimImageType members
def simGetImages(self, requests):
responses_raw = self.client.call('simGetImages', requests)
return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def getCollisionInfo(self):
return CollisionInfo.from_msgpack(self.client.call('getCollisionInfo'))
@staticmethod
def stringToUint8Array(bstr):
return np.fromstring(bstr, np.uint8)
@staticmethod
def stringToFloatArray(bstr):
return np.fromstring(bstr, np.float32)
@staticmethod
def listTo2DFloatArray(flst, width, height):
return np.reshape(np.asarray(flst, np.float32), (height, width))
@staticmethod
def getPfmArray(response):
return AirSimClientBase.listTo2DFloatArray(response.image_data_float, response.width, response.height)
@staticmethod
def get_public_fields(obj):
return [attr for attr in dir(obj)
if not (attr.startswith("_")
or inspect.isbuiltin(attr)
or inspect.isfunction(attr)
or inspect.ismethod(attr))]
@staticmethod
def to_dict(obj):
return dict([attr, getattr(obj, attr)] for attr in AirSimClientBase.get_public_fields(obj))
@staticmethod
def to_str(obj):
return str(AirSimClientBase.to_dict(obj))
@staticmethod
def write_file(filename, bstr):
with open(filename, 'wb') as afile:
afile.write(bstr)
def simSetPose(self, pose, ignore_collison):
self.client.call('simSetPose', pose, ignore_collison)
def simGetPose(self):
return self.client.call('simGetPose')
# helper method for converting getOrientation to roll/pitch/yaw
# https:#en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
@staticmethod
def toEulerianAngle(q):
z = q.z_val
y = q.y_val
x = q.x_val
w = q.w_val
ysqr = y * y
# roll (x-axis rotation)
t0 = +2.0 * (w*x + y*z)
t1 = +1.0 - 2.0*(x*x + ysqr)
roll = math.atan2(t0, t1)
# pitch (y-axis rotation)
t2 = +2.0 * (w*y - z*x)
if (t2 > 1.0):
t2 = 1
if (t2 < -1.0):
t2 = -1.0
pitch = math.asin(t2)
# yaw (z-axis rotation)
t3 = +2.0 * (w*z + x*y)
t4 = +1.0 - 2.0 * (ysqr + z*z)
yaw = math.atan2(t3, t4)
return (pitch, roll, yaw)
@staticmethod
def toQuaternion(pitch, roll, yaw):
t0 = math.cos(yaw * 0.5)
t1 = math.sin(yaw * 0.5)
t2 = math.cos(roll * 0.5)
t3 = math.sin(roll * 0.5)
t4 = math.cos(pitch * 0.5)
t5 = math.sin(pitch * 0.5)
q = Quaternionr()
q.w_val = t0 * t2 * t4 + t1 * t3 * t5 #w
q.x_val = t0 * t3 * t4 - t1 * t2 * t5 #x
q.y_val = t0 * t2 * t5 + t1 * t3 * t4 #y
q.z_val = t1 * t2 * t4 - t0 * t3 * t5 #z
return q
@staticmethod
def wait_key(message = ''):
''' Wait for a key press on the console and return it. '''
if message != '':
print (message)
result = None
if os.name == 'nt':
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
@staticmethod
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(r'^(\d+)\s(\d+)\s$', temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
#data = np.flipud(data)
file.close()
return data, scale
@staticmethod
def write_pfm(file, image, scale=1):
""" Write a pfm file """
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
temp_str = '%d %d\n' % (image.shape[1], image.shape[0])
file.write(temp_str.encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
temp_str = '%f\n' % scale
file.write(temp_str.encode('utf-8'))
image.tofile(file)
@staticmethod
def write_png(filename, image):
""" image must be numpy array H X W X channels
"""
import zlib, struct
buf = image.flatten().tobytes()
width = image.shape[1]
height = image.shape[0]
# reverse the vertical line order and add null bytes at the start
width_byte_4 = width * 4
raw_data = b''.join(b'\x00' + buf[span:span + width_byte_4]
for span in range((height - 1) * width_byte_4, -1, - width_byte_4))
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
png_bytes = b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
AirSimClientBase.write_file(filename, png_bytes)
# ----------------------------------- Multirotor APIs ---------------------------------------------
class MultirotorClient(AirSimClientBase, object):
def __init__(self, ip = ""):
if (ip == ""):
ip = "127.0.0.1"
super(MultirotorClient, self).__init__(ip, 41451)
def armDisarm(self, arm):
return self.client.call('armDisarm', arm)
def takeoff(self, max_wait_seconds = 15):
return self.client.call('takeoff', max_wait_seconds)
def land(self, max_wait_seconds = 60):
return self.client.call('land', max_wait_seconds)
def goHome(self):
return self.client.call('goHome')
def hover(self):
return self.client.call('hover')
# query vehicle state
def getPosition(self):
return Vector3r.from_msgpack(self.client.call('getPosition'))
def getVelocity(self):
return Vector3r.from_msgpack(self.client.call('getVelocity'))
def getOrientation(self):
return Quaternionr.from_msgpack(self.client.call('getOrientation'))
def getLandedState(self):
return self.client.call('getLandedState')
def getGpsLocation(self):
return GeoPoint.from_msgpack(self.client.call('getGpsLocation'))
def getPitchRollYaw(self):
return self.toEulerianAngle(self.getOrientation())
#def getRCData(self):
# return self.client.call('getRCData')
def timestampNow(self):
return self.client.call('timestampNow')
def isApiControlEnabled(self):
return self.client.call('isApiControlEnabled')
def isSimulationMode(self):
return self.client.call('isSimulationMode')
def getServerDebugInfo(self):
return self.client.call('getServerDebugInfo')
# APIs for control
def moveByAngle(self, pitch, roll, z, yaw, duration):
return self.client.call('moveByAngle', pitch, roll, z, yaw, duration)
def moveByVelocity(self, vx, vy, vz, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByVelocity', vx, vy, vz, duration, drivetrain, yaw_mode)
def moveByVelocityZ(self, vx, vy, z, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByVelocityZ', vx, vy, z, duration, drivetrain, yaw_mode)
def moveOnPath(self, path, velocity, max_wait_seconds = 60, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveOnPath', path, velocity, max_wait_seconds, drivetrain, yaw_mode, lookahead, adaptive_lookahead)
def moveToZ(self, z, velocity, max_wait_seconds = 60, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveToZ', z, velocity, max_wait_seconds, yaw_mode, lookahead, adaptive_lookahead)
def moveToPosition(self, x, y, z, velocity, max_wait_seconds = 60, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveToPosition', x, y, z, velocity, max_wait_seconds, drivetrain, yaw_mode, lookahead, adaptive_lookahead)
def moveByManual(self, vx_max, vy_max, z_min, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByManual', vx_max, vy_max, z_min, duration, drivetrain, yaw_mode)
def rotateToYaw(self, yaw, max_wait_seconds = 60, margin = 5):
return self.client.call('rotateToYaw', yaw, max_wait_seconds, margin)
def rotateByYawRate(self, yaw_rate, duration):
return self.client.call('rotateByYawRate', yaw_rate, duration)
# ----------------------------------- Car APIs ---------------------------------------------
class CarClient(AirSimClientBase, object):
def __init__(self, ip = ""):
if (ip == ""):
ip = "127.0.0.1"
super(CarClient, self).__init__(ip, 42451)
def setCarControls(self, controls):
self.client.call('setCarControls', controls)
def getCarState(self):
state_raw = self.client.call('getCarState')
return CarState.from_msgpack(state_raw)
|
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the SSD Model.
Defines model_fn of SSD for TF Estimator. The model_fn includes SSD
model architecture, loss function, learning rate schedule, and evaluation
procedure.
T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar
Focal Loss for Dense Object Detection. arXiv:1708.02002
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools as it
import math
import numpy as np
import tensorflow as tf
import ssd_architecture
import ssd_constants
from tensorflow.contrib.tpu.python.tpu import bfloat16
BBOX_XFORM_CLIP = np.log(1000. / 16.)
class DefaultBoxes(object):
"""Default bounding boxes for 1200x1200 5 layer SSD.
Default bounding boxes generation follows the order of (W, H, anchor_sizes).
Therefore, the tensor converted from DefaultBoxes has a shape of
[anchor_sizes, H, W, 4]. The last dimension is the box coordinates; 'ltrb'
is [ymin, xmin, ymax, xmax] while 'xywh' is [cy, cx, h, w].
"""
def __init__(self):
steps = [
int(ssd_constants.IMAGE_SIZE / fs) for fs in ssd_constants.FEATURE_SIZES
]
fk = ssd_constants.IMAGE_SIZE / np.array(steps)
self.default_boxes = []
# Scale by image size.
scales = [
int(s * ssd_constants.IMAGE_SIZE / 300) for s in ssd_constants.SCALES
]
# size of feature and number of feature
for idx, feature_size in enumerate(ssd_constants.FEATURE_SIZES):
sk1 = scales[idx] / ssd_constants.IMAGE_SIZE
sk2 = scales[idx + 1] / ssd_constants.IMAGE_SIZE
sk3 = math.sqrt(sk1 * sk2)
all_sizes = [(sk1, sk1), (sk3, sk3)]
for alpha in ssd_constants.ASPECT_RATIOS[idx]:
w, h = sk1 * math.sqrt(alpha), sk1 / math.sqrt(alpha)
all_sizes.append((w, h))
all_sizes.append((h, w))
assert len(all_sizes) == ssd_constants.NUM_DEFAULTS[idx]
for w, h in all_sizes:
for i, j in it.product(range(feature_size), repeat=2):
cx, cy = (j + 0.5) / fk[idx], (i + 0.5) / fk[idx]
box = tuple(np.clip(k, 0, 1) for k in (cy, cx, h, w))
self.default_boxes.append(box)
assert len(self.default_boxes) == ssd_constants.NUM_SSD_BOXES
def to_ltrb(cy, cx, h, w):
return cy - h / 2, cx - w / 2, cy + h / 2, cx + w / 2
# For IoU calculation
self.default_boxes_ltrb = tuple(to_ltrb(*i) for i in self.default_boxes)
def __call__(self, order='ltrb'):
if order == 'ltrb':
return self.default_boxes_ltrb
if order == 'xywh':
return self.default_boxes
def decode_boxes(encoded_boxes, anchors, weights=None):
"""Decode boxes.
Args:
encoded_boxes: a tensor whose last dimension is 4 representing the
coordinates of encoded boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as `boxes` representing the
coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
decoded box targets.
"""
with tf.name_scope('decode_box'):
encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)
dy = encoded_boxes[..., 0:1]
dx = encoded_boxes[..., 1:2]
dh = encoded_boxes[..., 2:3]
dw = encoded_boxes[..., 3:4]
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
dh = tf.minimum(dh, BBOX_XFORM_CLIP)
dw = tf.minimum(dw, BBOX_XFORM_CLIP)
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin
anchor_w = anchor_xmax - anchor_xmin
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
decoded_boxes_yc = dy * anchor_h + anchor_yc
decoded_boxes_xc = dx * anchor_w + anchor_xc
decoded_boxes_h = tf.exp(dh) * anchor_h
decoded_boxes_w = tf.exp(dw) * anchor_w
decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h
decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w
decoded_boxes_ymax = decoded_boxes_yc + 0.5 * decoded_boxes_h
decoded_boxes_xmax = decoded_boxes_xc + 0.5 * decoded_boxes_w
decoded_boxes = tf.concat([
decoded_boxes_ymin, decoded_boxes_xmin, decoded_boxes_ymax,
decoded_boxes_xmax
],
axis=-1)
return decoded_boxes
def select_top_k_scores(scores_in, pre_nms_num_detections=5000):
"""Select top_k scores and indices for each class.
Args:
scores_in: a Tensor with shape [batch_size, num_classes, N], which stacks
class logit outputs on all feature levels. The N is the number of total
anchors on all levels. The num_classes is the number of classes predicted
by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: Tensors with shape [batch_size, pre_nms_num_detections,
num_classes].
"""
_, num_class, num_anchors = scores_in.get_shape().as_list()
scores = tf.reshape(scores_in, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores,
[-1, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices,
[-1, num_class, pre_nms_num_detections])
return tf.transpose(top_k_scores, [0, 2, 1]), tf.transpose(
top_k_indices, [0, 2, 1])
def _filter_scores(scores, boxes, min_score=ssd_constants.MIN_SCORE):
mask = scores > min_score
scores = tf.where(mask, scores, tf.zeros_like(scores))
boxes = tf.where(
tf.tile(tf.expand_dims(mask, 2), (1, 1, 4)), boxes, tf.zeros_like(boxes))
return scores, boxes
def non_max_suppression(scores_in,
boxes_in,
top_k_indices,
source_id,
raw_shape,
num_detections=ssd_constants.MAX_NUM_EVAL_BOXES):
"""Implement Non-maximum suppression.
Args:
scores_in: a Tensor with shape [batch_size,
ssd_constants.MAX_NUM_EVAL_BOXES, num_classes]. The top
ssd_constants.MAX_NUM_EVAL_BOXES box scores for each class.
boxes_in: a Tensor with shape [batch_size, N, 4], which stacks box
regression outputs on all feature levels. The N is the number of total
anchors on all levels.
top_k_indices: a Tensor with shape [batch_size,
ssd_constants.MAX_NUM_EVAL_BOXES, num_classes]. The indices for these top
boxes for each class.
source_id: a Tensor with shape [batch_size]
raw_shape: a Tensor with shape [batch_size, 3]
num_detections: maximum output length.
Returns:
A tensor size of [batch_size, num_detections, 6] represents boxes, labels
and scores after NMS.
"""
_, _, num_classes = scores_in.get_shape().as_list()
source_id = tf.to_float(
tf.tile(tf.expand_dims(source_id, 1), [1, num_detections]))
raw_shape = tf.to_float(
tf.tile(tf.expand_dims(raw_shape, 1), [1, num_detections, 1]))
list_of_all_boxes = []
list_of_all_scores = []
list_of_all_classes = []
# Skip background class.
for class_i in range(1, num_classes, 1):
boxes = tf.batch_gather(boxes_in, top_k_indices[:, :, class_i])
class_i_scores = scores_in[:, :, class_i]
class_i_scores, boxes = _filter_scores(class_i_scores, boxes)
(class_i_post_scores,
class_i_post_boxes) = ssd_architecture.non_max_suppression_padded(
scores=tf.to_float(class_i_scores),
boxes=tf.to_float(boxes),
max_output_size=num_detections,
iou_threshold=ssd_constants.OVERLAP_CRITERIA)
class_i_classes = tf.fill(tf.shape(class_i_post_scores), class_i)
list_of_all_boxes.append(class_i_post_boxes)
list_of_all_scores.append(class_i_post_scores)
list_of_all_classes.append(class_i_classes)
post_nms_boxes = tf.concat(list_of_all_boxes, axis=1)
post_nms_scores = tf.concat(list_of_all_scores, axis=1)
post_nms_classes = tf.concat(list_of_all_classes, axis=1)
# sort all results.
post_nms_scores, sorted_indices = tf.nn.top_k(
tf.to_float(post_nms_scores), k=num_detections, sorted=True)
post_nms_boxes = tf.batch_gather(post_nms_boxes, sorted_indices)
post_nms_classes = tf.batch_gather(post_nms_classes, sorted_indices)
detections_result = tf.stack([
source_id,
post_nms_boxes[:, :, 0],
post_nms_boxes[:, :, 1],
post_nms_boxes[:, :, 2],
post_nms_boxes[:, :, 3],
post_nms_scores,
tf.to_float(post_nms_classes),
],
axis=2)
return detections_result
def concat_outputs(cls_outputs, box_outputs):
"""Concatenate predictions into a single tensor.
This function takes the dicts of class and box prediction tensors and
concatenates them into a single tensor for comparison with the ground truth
boxes and class labels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width,
num_anchors * num_classses].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in
[batch_size, height, width, num_anchors * 4].
Returns:
concatenanted cls_outputs with shape [batch_size, num_classes, N] and
concatenanted box_outputs with shape [batch_size, 4, N], where N is number
of anchors.
"""
assert set(cls_outputs.keys()) == set(box_outputs.keys())
# This sort matters. The labels assume a certain order based on
# ssd_constants.FEATURE_SIZES, and this sort matches that convention.
keys = sorted(cls_outputs.keys())
flat_cls = []
flat_box = []
for i, k in enumerate(keys):
# TODO(taylorrobie): confirm that this reshape, transpose,
# reshape is correct.
scale = ssd_constants.FEATURE_SIZES[i]
last_dim_size = scale * scale * ssd_constants.NUM_DEFAULTS[i]
split_shape = (ssd_constants.NUM_CLASSES, ssd_constants.NUM_DEFAULTS[i])
assert cls_outputs[k].shape[3] == split_shape[0] * split_shape[1]
flat_cls.append(
tf.reshape(
tf.transpose(cls_outputs[k], [0, 3, 1, 2]),
[-1, ssd_constants.NUM_CLASSES, last_dim_size]))
split_shape = (ssd_constants.NUM_DEFAULTS[i], 4)
assert box_outputs[k].shape[3] == split_shape[0] * split_shape[1]
flat_box.append(
tf.reshape(
tf.transpose(box_outputs[k], [0, 3, 1, 2]), [-1, 4, last_dim_size]))
return tf.concat(flat_cls, axis=2), tf.concat(flat_box, axis=2)
def _model_fn(images, source_id, raw_shape, params, model):
"""Model defination for the SSD model based on ResNet-50.
Args:
images: the input image tensor with shape [batch_size, height, width, 3].
The height and width are fixed and equal.
source_id: a Tensor with shape [batch_size]
raw_shape: a Tensor with shape [batch_size, 3]
params: the dictionary defines hyperparameters of model. The default
settings are in default_hparams function in this file.
model: the SSD model outputs class logits and box regression outputs.
Returns:
spec: the EstimatorSpec or TPUEstimatorSpec to run training, evaluation,
or prediction.
"""
features = images
def _model_outputs():
return model(features, params, is_training_bn=False)
if params['use_bfloat16']:
with bfloat16.bfloat16_scope():
cls_outputs, box_outputs = _model_outputs()
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
else:
cls_outputs, box_outputs = _model_outputs()
levels = cls_outputs.keys()
flattened_cls, flattened_box = concat_outputs(cls_outputs, box_outputs)
y_min, x_min, y_max, x_max = tf.split(flattened_box, 4, axis=1)
flattened_box = tf.concat([x_min, y_min, x_max, y_max], axis=1)
# [batch_size, 4, N] to [batch_size, N, 4]
flattened_box = tf.transpose(flattened_box, [0, 2, 1])
anchors = tf.convert_to_tensor(DefaultBoxes()('ltrb'))
decoded_boxes = decode_boxes(
encoded_boxes=flattened_box,
anchors=anchors,
weights=ssd_constants.BOX_CODER_SCALES)
pred_scores = tf.nn.softmax(flattened_cls, axis=1)
pred_scores, indices = select_top_k_scores(pred_scores,
ssd_constants.MAX_NUM_EVAL_BOXES)
detections = non_max_suppression(
scores_in=pred_scores,
boxes_in=decoded_boxes,
top_k_indices=indices,
source_id=source_id,
raw_shape=raw_shape)
return detections
def ssd_model_fn(images, source_id, raw_shape, params):
"""SSD model."""
return _model_fn(
images, source_id, raw_shape, params, model=ssd_architecture.ssd)
def default_hparams():
return tf.contrib.training.HParams(
use_bfloat16=True,
transpose_input=True,
nms_on_tpu=True,
conv0_space_to_depth=False,
use_cocoeval_cc=True,
use_spatial_partitioning=False,
)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Human and extractive baseline evaluation.
human_and_extractive \
--data_dir=$ROCSTORIES_DATA \
--eval_subset=test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from rouge import rouge_scorer
from rouge import scoring
from summae import p2s_eval
from summae import util
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '.', 'Data directory.')
flags.DEFINE_string('eval_subset', 'test',
'which subset (valid/test) to eval/decode.')
flags.DEFINE_string('output_dir', '/tmp/12342',
'local directory to save extractive oracle')
flags.DEFINE_string('vocab_file', '',
'Subword vocab file.') # for detok first sentence
my_rouge_scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'],
use_stemmer=True)
def get_extracts(s):
# get 5 sentences as the extractive baselines
sents = s.feature_lists.feature_list['untokenized_sentences'].feature
assert len(sents) == 5
return tuple([sents[i].bytes_list.value[0] for i in range(5)])
def human_ave(summ_list):
"""Average pairwise rouge between two human summaries."""
agg = scoring.BootstrapAggregator()
for s1_id, s1 in enumerate(summ_list):
for s2_id, s2 in enumerate(summ_list):
if s1_id >= s2_id: # only compute for s1_id < s2_id
continue
s2_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(s2), p2s_eval.TRUNC_LEN)
s1_s2_trunc_score = my_rouge_scorer.score(s1, s2_trunc)
agg.add_scores(s1_s2_trunc_score)
agg_ave = agg.aggregate()
score_ave = {
rouge_type: agg_ave[rouge_type].mid for rouge_type in agg_ave # mid=mean
}
nwords_ave = np.mean([p2s_eval.count_words(s) for s in summ_list])
return (score_ave, nwords_ave)
def human_max(summ_list):
"""Maximum pairwise rouge between any two human summaries."""
score_max = None
rouge_1r_trunc_max = 0
for s1_id, s1 in enumerate(summ_list):
for s2_id, s2 in enumerate(summ_list):
if s1_id >= s2_id:
continue
s2_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(s2), p2s_eval.TRUNC_LEN)
s1_s2_trunc_score = my_rouge_scorer.score(s1, s2_trunc)
if s1_s2_trunc_score['rouge1'].recall >= rouge_1r_trunc_max:
score_max = s1_s2_trunc_score
rouge_1r_trunc_max = s1_s2_trunc_score['rouge1'].recall
nwords_max = np.max([p2s_eval.count_words(s) for s in summ_list])
return (score_max, nwords_max)
def extract_ave(e, summ_list):
"""Average rouge between ith sentence and human summaries."""
agg = scoring.BootstrapAggregator()
e_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(e),
p2s_eval.TRUNC_LEN) # get_summary_first_sentence may not be necessary
for s in summ_list:
s_e_trunc_score = my_rouge_scorer.score(s, e_trunc)
agg.add_scores(s_e_trunc_score)
agg_ave = agg.aggregate()
score_ave = {
rouge_type: agg_ave[rouge_type].mid for rouge_type in agg_ave # mid=mean
}
nwords_e = p2s_eval.count_words(e)
return (score_ave, nwords_e)
def extract_oracle(extract_list, summ_list):
"""Choose sentence with maximum average rouge."""
# Choose sentence with maximum average rouge.
score_accum = []
for e in extract_list:
e_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(e),
p2s_eval.TRUNC_LEN) # get_summary_first_sentence may not be necessary
accum_rouge_1r_trunc = 0
for s in summ_list:
s_e_trunc_score = my_rouge_scorer.score(s, e_trunc)
# for computing accumulative rouge
accum_rouge_1r_trunc += s_e_trunc_score['rouge1'].recall
score_accum.append(accum_rouge_1r_trunc)
e_id_o = np.argmax(score_accum)
e_o = extract_list[e_id_o]
# Compute average rouge for the oracle sentence
agg = scoring.BootstrapAggregator()
e_o_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(e_o),
p2s_eval.TRUNC_LEN) # get_summary_first_sentence may not be necessary
for s in summ_list:
e_o_trunc_score = my_rouge_scorer.score(s, e_o_trunc)
agg.add_scores(e_o_trunc_score)
agg_o = agg.aggregate()
score_o = {
rouge_type: agg_o[rouge_type].mid for rouge_type in agg_o # mid=mean
}
nwords_o = p2s_eval.count_words(e_o)
return (score_o, nwords_o, e_o)
def print_agg_score(label, agg, nwords):
print(
'%s: \n\t rouge-1r-trunc20=%.3f \t rouge-Lr-trunc20=%.3f \t nwords=%.1f' %
(label, agg.aggregate()['rouge1'].mid.recall,
agg.aggregate()['rougeL'].mid.recall, np.mean(nwords)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.io.gfile.mkdir(FLAGS.output_dir)
data_file = os.path.join(
FLAGS.data_dir,
'rocstories_gt.' + six.ensure_str(FLAGS.eval_subset) + '.tfrecord')
seq_ex_list = util.get_seq_exs(data_file)
print('Input data %s' % data_file)
# Human summary baselines.
# We have 3 human summaries for each example, and
# 2 human performance variants:
# 1. 'a': average pairwise rouge between two summaries
# 2. 'm': maximum pairwise rouge between any two summaries
agg_human = {}
nwords_human = {}
for h in ['a', 'm']:
agg_human[h] = scoring.BootstrapAggregator()
nwords_human[h] = []
# Extractive baselines
# 1. '1','2','3','4','5': rouge between ith sentence and human summary
# 2. 'o': for each example, choose sentence with maximum average rouge
agg_extract = {}
nwords_extract = {}
for e in [str(x) for x in list(range(5))] + ['o']:
agg_extract[e] = scoring.BootstrapAggregator()
nwords_extract[e] = []
# human performance
sent2oracle = {}
for ex in seq_ex_list:
summ_list = p2s_eval.get_summaries(ex)
summ_list = [x.decode('utf-8') for x in summ_list]
# human eval
score, nwords = human_ave(summ_list)
agg_human['a'].add_scores(score)
nwords_human['a'].append(nwords)
score, nwords = human_max(summ_list)
agg_human['m'].add_scores(score)
nwords_human['m'].append(nwords)
# extractive eval
extract_list = get_extracts(ex)
extract_list = [x.decode('utf-8') for x in extract_list]
for e_id, e in enumerate(extract_list):
score, nwords = extract_ave(e, summ_list)
agg_extract[str(e_id)].add_scores(score)
nwords_extract[str(e_id)].append(nwords)
score, nwords, e_o = extract_oracle(extract_list, summ_list)
agg_extract['o'].add_scores(score)
nwords_extract['o'].append(nwords)
# save story and oracle sentence for future use
first = p2s_eval.get_first_sentence(ex)
if first in sent2oracle:
logging.fatal('duplicate first sentence: %s', str(first))
sent2oracle[first] = (' '.join(extract_list), e_o) # (story, oracle)
# write each example and the corresponding oracle to disk
tk, _ = util.get_tokenizer_with_special(FLAGS.vocab_file, [])
def detok(s):
return tk.decode(util.strip_after_eos(s))
keys_sorted = sorted(sent2oracle.keys(), key=detok)
out_file = os.path.join(
FLAGS.output_dir, 'rocstories_gt.' + six.ensure_str(FLAGS.eval_subset) +
'.firstsent2oracle.txt')
with tf.gfile.Open(out_file, 'w') as f:
for k in keys_sorted:
f.write('%s\n' % (sent2oracle[k][1]))
# print out rouge scores for human performance
print_agg_score('human average', agg_human['a'], nwords_human['a'])
print_agg_score('human max', agg_human['m'], nwords_human['m'])
for e_id in range(5):
print_agg_score('extractive baseline{}'.format(e_id),
agg_extract[str(e_id)], nwords_extract[str(e_id)])
print_agg_score('extractive oracle', agg_extract['o'], nwords_extract['o'])
if __name__ == '__main__':
app.run(main)
|
|
"""
Packet handling functionality for Empire.
Defines packet types, builds tasking packets and parses result packets.
Packet format:
RC4s = RC4 encrypted with the shared staging key
HMACs = SHA1 HMAC using the shared staging key
AESc = AES encrypted using the client's session key
HMACc = first 10 bytes of a SHA256 HMAC using the client's session key
Routing Packet:
+---------+-------------------+--------------------------+
| RC4 IV | RC4s(RoutingData) | AESc(client packet data) | ...
+---------+-------------------+--------------------------+
| 4 | 16 | RC4 length |
+---------+-------------------+--------------------------+
RC4s(RoutingData):
+-----------+------+------+-------+--------+
| SessionID | Lang | Meta | Extra | Length |
+-----------+------+------+-------+--------+
| 8 | 1 | 1 | 2 | 4 |
+-----------+------+------+-------+--------+
SessionID = the sessionID that the packet is bound for
Lang = indicates the language used
Meta = indicates staging req/tasking req/result post/etc.
Extra = reserved for future expansion
AESc(client data)
+--------+-----------------+-------+
| AES IV | Enc Packet Data | HMACc |
+--------+-----------------+-------+
| 16 | % 16 bytes | 10 |
+--------+-----------------+-------+
Client data decrypted:
+------+--------+--------------------+----------+---------+-----------+
| Type | Length | total # of packets | packet # | task ID | task data |
+------+--------+--------------------+--------------------+-----------+
| 2 | 4 | 2 | 2 | 2 | <Length> |
+------+--------+--------------------+----------+---------+-----------+
type = packet type
total # of packets = number of total packets in the transmission
Packet # = where the packet fits in the transmission
Task ID = links the tasking to results for deconflict on server side
Client *_SAVE packets have the sub format:
[15 chars] - save prefix
[5 chars] - extension
[X...] - tasking data
"""
import struct
import base64
import os
import hashlib
import hmac
from Crypto import Random
from pydispatch import dispatcher
# Empire imports
import encryption
# 0 -> error
# 1-99 -> standard functionality
# 100-199 -> dynamic functionality
# 200-299 -> SMB functionality
PACKET_NAMES = {
"ERROR" : 0,
"TASK_SYSINFO" : 1,
"TASK_EXIT" : 2,
"TASK_SET_DELAY" : 10,
"TASK_GET_DELAY" : 12,
"TASK_SET_SERVERS" : 13,
"TASK_ADD_SERVERS" : 14,
"TASK_UPDATE_PROFILE" : 20,
"TASK_SET_KILLDATE" : 30,
"TASK_GET_KILLDATE" : 31,
"TASK_SET_WORKING_HOURS" : 32,
"TASK_GET_WORKING_HOURS" : 33,
"TASK_SHELL" : 40,
"TASK_DOWNLOAD" : 41,
"TASK_UPLOAD" : 42,
"TASK_GETJOBS" : 50,
"TASK_STOPJOB" : 51,
"TASK_GETDOWNLOADS" : 52,
"TASK_STOPDOWNLOAD" : 53,
"TASK_CMD_WAIT" : 100,
"TASK_CMD_WAIT_SAVE" : 101,
"TASK_CMD_JOB" : 110,
"TASK_CMD_JOB_SAVE" : 111,
"TASK_SCRIPT_IMPORT" : 120,
"TASK_SCRIPT_COMMAND" : 121,
"TASK_IMPORT_MODULE" : 122,
"TASK_VIEW_MODULE" : 123,
"TASK_REMOVE_MODULE" : 124,
"TASK_SWITCH_LISTENER" : 130
}
# build a lookup table for IDS
PACKET_IDS = {}
for name, ID in PACKET_NAMES.items(): PACKET_IDS[ID] = name
LANGUAGE = {
'NONE' : 0,
'POWERSHELL' : 1,
'PYTHON' : 2
}
LANGUAGE_IDS = {}
for name, ID in LANGUAGE.items(): LANGUAGE_IDS[ID] = name
META = {
'NONE' : 0,
'STAGE0' : 1,
'STAGE1' : 2,
'STAGE2' : 3,
'TASKING_REQUEST' : 4,
'RESULT_POST' : 5,
'SERVER_RESPONSE' : 6
}
META_IDS = {}
for name, ID in META.items(): META_IDS[ID] = name
ADDITIONAL = {}
ADDITIONAL_IDS = {}
for name, ID in ADDITIONAL.items(): ADDITIONAL_IDS[ID] = name
def build_task_packet(taskName, data, resultID):
"""
Build a task packet for an agent.
[2 bytes] - type
[2 bytes] - total # of packets
[2 bytes] - packet #
[2 bytes] - task/result ID
[4 bytes] - length
[X...] - result data
+------+--------------------+----------+---------+--------+-----------+
| Type | total # of packets | packet # | task ID | Length | task data |
+------+--------------------+--------------------+--------+-----------+
| 2 | 2 | 2 | 2 | 4 | <Length> |
+------+--------------------+----------+---------+--------+-----------+
"""
taskType = struct.pack('=H', PACKET_NAMES[taskName])
totalPacket = struct.pack('=H', 1)
packetNum = struct.pack('=H', 1)
resultID = struct.pack('=H', resultID)
length = struct.pack('=L',len(data))
return taskType + totalPacket + packetNum + resultID + length + data.decode('utf-8').encode('utf-8',errors='ignore')
def parse_result_packet(packet, offset=0):
"""
Parse a result packet-
[2 bytes] - type
[2 bytes] - total # of packets
[2 bytes] - packet #
[2 bytes] - task/result ID
[4 bytes] - length
[X...] - result data
+------+--------------------+----------+---------+--------+-----------+
| Type | total # of packets | packet # | task ID | Length | task data |
+------+--------------------+--------------------+--------+-----------+
| 2 | 2 | 2 | 2 | 4 | <Length> |
+------+--------------------+----------+---------+--------+-----------+
Returns a tuple with (responseName, length, data, remainingData)
Returns a tuple with (responseName, totalPackets, packetNum, taskID, length, data, remainingData)
"""
try:
responseID = struct.unpack('=H', packet[0+offset:2+offset])[0]
totalPacket = struct.unpack('=H', packet[2+offset:4+offset])[0]
packetNum = struct.unpack('=H', packet[4+offset:6+offset])[0]
taskID = struct.unpack('=H', packet[6+offset:8+offset])[0]
length = struct.unpack('=L', packet[8+offset:12+offset])[0]
if length != '0':
if length % 4:
#padding fix
datapart = packet[12+offset:12+offset+length]
datapart += '=' * (4 - length % 4)
data = base64.b64decode(datapart)
else:
data = base64.b64decode(packet[12+offset:12+offset+length])
#data = base64.b64decode(packet[12+offset:12+offset+length])
else:
data = None
remainingData = packet[12+offset+length:]
return (PACKET_IDS[responseID], totalPacket, packetNum, taskID, length, data, remainingData)
except Exception as e:
dispatcher.send("[*] parse_result_packet(): exception: %s" % (e), sender='Packets')
return (None, None, None, None, None, None, None)
def parse_result_packets(packets):
"""
Parse a blob of one or more result packets
"""
resultPackets = []
# parse the first result packet
(responseName, totalPacket, packetNum, taskID, length, data, remainingData) = parse_result_packet(packets)
if responseName and responseName != '':
resultPackets.append( (responseName, totalPacket, packetNum, taskID, length, data) )
# iterate 12 (size of packet header) + length of the decoded
offset = 12 + length
while remainingData and remainingData != '':
# parse any additional result packets
# (responseName, length, data, remainingData) = parse_result_packet(packets, offset=offset)
(responseName, totalPacket, packetNum, taskID, length, data, remainingData) = parse_result_packet(packets, offset=offset)
if responseName and responseName != '':
resultPackets.append( (responseName, totalPacket, packetNum, taskID, length, data) )
offset += 12 + length
return resultPackets
def parse_routing_packet(stagingKey, data):
"""
Decodes the rc4 "routing packet" and parses raw agent data into:
{sessionID : (language, meta, additional, [encData]), ...}
Routing packet format:
+---------+-------------------+--------------------------+
| RC4 IV | RC4s(RoutingData) | AESc(client packet data) | ...
+---------+-------------------+--------------------------+
| 4 | 16 | RC4 length |
+---------+-------------------+--------------------------+
RC4s(RoutingData):
+-----------+------+------+-------+--------+
| SessionID | Lang | Meta | Extra | Length |
+-----------+------+------+-------+--------+
| 8 | 1 | 1 | 2 | 4 |
+-----------+------+------+-------+--------+
"""
if data:
results = {}
offset = 0
# ensure we have at least the 20 bytes for a routing packet
if len(data) >= 20:
while True:
if len(data) - offset < 20:
break
RC4IV = data[0+offset:4+offset]
RC4data = data[4+offset:20+offset]
routingPacket = encryption.rc4(RC4IV+stagingKey, RC4data)
sessionID = routingPacket[0:8]
# B == 1 byte unsigned char, H == 2 byte unsigned short, L == 4 byte unsigned long
(language, meta, additional, length) = struct.unpack("=BBHL", routingPacket[8:])
if length < 0:
dispatcher.send('[*] parse_agent_data(): length in decoded rc4 packet is < 0', sender='Packets')
encData = None
else:
encData = data[(20+offset):(20+offset+length)]
results[sessionID] = (LANGUAGE_IDS.get(language, 'NONE'), META_IDS.get(meta, 'NONE'), ADDITIONAL_IDS.get(additional, 'NONE'), encData)
# check if we're at the end of the packet processing
remainingData = data[20+offset+length:]
if not remainingData or remainingData == '':
break
offset += 20 + length
return results
else:
dispatcher.send("[*] parse_agent_data() data length incorrect: %s" % (len(data)), sender='Packets')
return None
else:
dispatcher.send("[*] parse_agent_data() data is None", sender='Packets')
return None
def build_routing_packet(stagingKey, sessionID, language, meta="NONE", additional="NONE", encData=''):
"""
Takes the specified parameters for an RC4 "routing packet" and builds/returns
an HMAC'ed RC4 "routing packet".
packet format:
Routing Packet:
+---------+-------------------+--------------------------+
| RC4 IV | RC4s(RoutingData) | AESc(client packet data) | ...
+---------+-------------------+--------------------------+
| 4 | 16 | RC4 length |
+---------+-------------------+--------------------------+
RC4s(RoutingData):
+-----------+------+------+-------+--------+
| SessionID | Lang | Meta | Extra | Length |
+-----------+------+------+-------+--------+
| 8 | 1 | 1 | 2 | 4 |
+-----------+------+------+-------+--------+
"""
# binary pack all of the passed config values as unsigned numbers
# B == 1 byte unsigned char, H == 2 byte unsigned short, L == 4 byte unsigned long
data = sessionID + struct.pack("=BBHL", LANGUAGE.get(language.upper(), 0), META.get(meta.upper(), 0), ADDITIONAL.get(additional.upper(), 0), len(encData))
# RC4IV = os.urandom(4)
RC4IV = Random.new().read(4)
stagingKey = str(stagingKey)
key = RC4IV + stagingKey
rc4EncData = encryption.rc4(key, data)
# return an rc4 encyption of the routing packet, append an HMAC of the packet, then the actual encrypted data
packet = RC4IV + rc4EncData + encData
return packet
def resolve_id(PacketID):
"""
Resolve a packet ID to its key.
"""
try:
return PACKET_IDS[int(PacketID)]
except:
return PACKET_IDS[0]
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C)2005-2014 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import os
import time
import urllib
from abc import ABCMeta, abstractmethod
from genshi.builder import tag
from trac.config import BoolOption, ConfigurationError, IntOption, Option
from trac.core import *
from trac.db.pool import ConnectionPool
from trac.db.schema import Table
from trac.db.util import ConnectionWrapper
from trac.util.concurrency import ThreadLocal
from trac.util.text import unicode_passwd
from trac.util.translation import _, tag_
def with_transaction(env, db=None):
"""Function decorator to emulate a context manager for database
transactions.
>>> def api_method(p1, p2):
>>> result[0] = value1
>>> @with_transaction(env)
>>> def implementation(db):
>>> # implementation
>>> result[0] = value2
>>> return result[0]
In this example, the `implementation()` function is called
automatically right after its definition, with a database
connection as an argument. If the function completes, a COMMIT is
issued on the connection. If the function raises an exception, a
ROLLBACK is issued and the exception is re-raised. Nested
transactions are supported, and a COMMIT will only be issued when
the outermost transaction block in a thread exits.
This mechanism is intended to replace the former practice of
getting a database connection with `env.get_db_cnx()` and issuing
an explicit commit or rollback, for mutating database
accesses. Its automatic handling of commit, rollback and nesting
makes it much more robust.
The optional `db` argument is intended for legacy code and should
not be used in new code.
:deprecated: This decorator is in turn deprecated in favor of
context managers now that python 2.4 support has been
dropped. It will be removed in Trac 1.3.1. Use instead
the new context managers, `QueryContextManager` and
`TransactionContextManager`, which make for much
simpler to write code:
>>> def api_method(p1, p2):
>>> result = value1
>>> with env.db_transaction as db:
>>> # implementation
>>> result = value2
>>> return result
"""
dbm = DatabaseManager(env)
_transaction_local = dbm._transaction_local
def transaction_wrapper(fn):
ldb = _transaction_local.wdb
if db is not None:
if ldb is None:
_transaction_local.wdb = db
try:
fn(db)
finally:
_transaction_local.wdb = None
else:
assert ldb is db, "Invalid transaction nesting"
fn(db)
elif ldb:
fn(ldb)
else:
ldb = _transaction_local.wdb = dbm.get_connection()
try:
fn(ldb)
ldb.commit()
_transaction_local.wdb = None
except:
_transaction_local.wdb = None
ldb.rollback()
ldb = None
raise
return transaction_wrapper
class DbContextManager(object):
"""Database Context Manager
The outermost `DbContextManager` will close the connection.
"""
db = None
def __init__(self, env):
self.dbmgr = DatabaseManager(env)
def execute(self, query, params=None):
"""Shortcut for directly executing a query."""
with self as db:
return db.execute(query, params)
__call__ = execute
def executemany(self, query, params=None):
"""Shortcut for directly calling "executemany" on a query."""
with self as db:
return db.executemany(query, params)
class TransactionContextManager(DbContextManager):
"""Transactioned Database Context Manager for retrieving a
`~trac.db.util.ConnectionWrapper`.
The outermost such context manager will perform a commit upon
normal exit or a rollback after an exception.
"""
def __enter__(self):
db = self.dbmgr._transaction_local.wdb # outermost writable db
if not db:
db = self.dbmgr._transaction_local.rdb # reuse wrapped connection
if db:
db = ConnectionWrapper(db.cnx, db.log)
else:
db = self.dbmgr.get_connection()
self.dbmgr._transaction_local.wdb = self.db = db
return db
def __exit__(self, et, ev, tb):
if self.db:
self.dbmgr._transaction_local.wdb = None
if et is None:
self.db.commit()
else:
self.db.rollback()
if not self.dbmgr._transaction_local.rdb:
self.db.close()
class QueryContextManager(DbContextManager):
"""Database Context Manager for retrieving a read-only
`~trac.db.util.ConnectionWrapper`.
"""
def __enter__(self):
db = self.dbmgr._transaction_local.rdb # outermost readonly db
if not db:
db = self.dbmgr._transaction_local.wdb # reuse wrapped connection
if db:
db = ConnectionWrapper(db.cnx, db.log, readonly=True)
else:
db = self.dbmgr.get_connection(readonly=True)
self.dbmgr._transaction_local.rdb = self.db = db
return db
def __exit__(self, et, ev, tb):
if self.db:
self.dbmgr._transaction_local.rdb = None
if not self.dbmgr._transaction_local.wdb:
self.db.close()
class ConnectionBase(object):
"""Abstract base class for database connection classes."""
__metaclass__ = ABCMeta
@abstractmethod
def cast(self, column, type):
"""Returns a clause casting `column` as `type`."""
pass
@abstractmethod
def concat(self, *args):
"""Returns a clause concatenating the sequence `args`."""
pass
@abstractmethod
def drop_table(self, table):
"""Drops the `table`."""
pass
@abstractmethod
def get_column_names(self, table):
"""Returns the list of the column names in `table`."""
pass
@abstractmethod
def get_last_id(self, cursor, table, column='id'):
"""Returns the current value of the primary key sequence for `table`.
The `column` of the primary key may be specified, which defaults
to `id`."""
pass
@abstractmethod
def get_table_names(self):
"""Returns a list of the table names."""
pass
@abstractmethod
def like(self):
"""Returns a case-insensitive `LIKE` clause."""
pass
@abstractmethod
def like_escape(self, text):
"""Returns `text` escaped for use in a `LIKE` clause."""
pass
@abstractmethod
def prefix_match(self):
"""Return a case sensitive prefix-matching operator."""
pass
@abstractmethod
def prefix_match_value(self, prefix):
"""Return a value for case sensitive prefix-matching operator."""
pass
@abstractmethod
def quote(self, identifier):
"""Returns the quoted `identifier`."""
pass
@abstractmethod
def reset_tables(self):
"""Deletes all data from the tables and resets autoincrement indexes.
:return: list of names of the tables that were reset.
"""
pass
@abstractmethod
def update_sequence(self, cursor, table, column='id'):
"""Updates the current value of the primary key sequence for `table`.
The `column` of the primary key may be specified, which defaults
to `id`."""
pass
class IDatabaseConnector(Interface):
"""Extension point interface for components that support the
connection to relational databases.
"""
def get_supported_schemes():
"""Return the connection URL schemes supported by the
connector, and their relative priorities as an iterable of
`(scheme, priority)` tuples.
If `priority` is a negative number, this is indicative of an
error condition with the connector. An error message should be
attached to the `error` attribute of the connector.
"""
def get_connection(path, log=None, **kwargs):
"""Create a new connection to the database."""
def get_exceptions():
"""Return an object (typically a module) containing all the
backend-specific exception types as attributes, named
according to the Python Database API
(http://www.python.org/dev/peps/pep-0249/).
"""
def init_db(path, schema=None, log=None, **kwargs):
"""Initialize the database."""
def destroy_db(self, path, log=None, **kwargs):
"""Destroy the database."""
def to_sql(table):
"""Return the DDL statements necessary to create the specified
table, including indices."""
def backup(dest):
"""Backup the database to a location defined by
trac.backup_dir"""
class DatabaseManager(Component):
"""Component used to manage the `IDatabaseConnector` implementations."""
connectors = ExtensionPoint(IDatabaseConnector)
connection_uri = Option('trac', 'database', 'sqlite:db/trac.db',
"""Database connection
[wiki:TracEnvironment#DatabaseConnectionStrings string] for this
project""")
backup_dir = Option('trac', 'backup_dir', 'db',
"""Database backup location""")
timeout = IntOption('trac', 'timeout', '20',
"""Timeout value for database connection, in seconds.
Use '0' to specify ''no timeout''.""")
debug_sql = BoolOption('trac', 'debug_sql', False,
"""Show the SQL queries in the Trac log, at DEBUG level.
""")
def __init__(self):
self._cnx_pool = None
self._transaction_local = ThreadLocal(wdb=None, rdb=None)
def init_db(self):
connector, args = self.get_connector()
from trac.db_default import schema
args['schema'] = schema
connector.init_db(**args)
def destroy_db(self):
connector, args = self.get_connector()
connector.destroy_db(**args)
self.shutdown()
def create_tables(self, schema):
"""Create the specified tables.
:param schema: an iterable of table objects.
:since: version 1.0.2
"""
connector = self.get_connector()[0]
with self.env.db_transaction as db:
for table in schema:
for sql in connector.to_sql(table):
db(sql)
def drop_tables(self, schema):
"""Drop the specified tables.
:param schema: an iterable of `Table` objects or table names.
:since: version 1.0.2
"""
with self.env.db_transaction as db:
for table in schema:
table_name = table.name if isinstance(table, Table) else table
db.drop_table(table_name)
def insert_into_tables(self, data_or_callable):
"""Insert data into existing tables.
:param data_or_callable: Nested tuples of table names, column names
and row data:
(table1,
(column1, column2),
((row1col1, row1col2), (row2col1, row2col2)),
table2, ...)
or a callable that takes a single parameter
`db` and returns the aforementioned nested
tuple.
:since: version 1.1.3
"""
with self.env.db_transaction as db:
data = data_or_callable(db) if callable(data_or_callable) \
else data_or_callable
for table, cols, vals in data:
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (table, ','.join(cols),
','.join(['%s'] * len(cols))), vals)
def reset_tables(self):
"""Deletes all data from the tables and resets autoincrement indexes.
:return: list of names of the tables that were reset.
:since: version 1.1.3
"""
with self.env.db_transaction as db:
return db.reset_tables()
def get_connection(self, readonly=False):
"""Get a database connection from the pool.
If `readonly` is `True`, the returned connection will purposely
lack the `rollback` and `commit` methods.
"""
if not self._cnx_pool:
connector, args = self.get_connector()
self._cnx_pool = ConnectionPool(5, connector, **args)
db = self._cnx_pool.get_cnx(self.timeout or None)
if readonly:
db = ConnectionWrapper(db, readonly=True)
return db
def get_database_version(self, name='database_version'):
"""Returns the database version from the SYSTEM table as an int,
or `False` if the entry is not found.
:param name: The name of the entry that contains the database version
in the SYSTEM table. Defaults to `database_version`,
which contains the database version for Trac.
"""
rows = self.env.db_query("""
SELECT value FROM system WHERE name=%s
""", (name,))
return int(rows[0][0]) if rows else False
def get_exceptions(self):
return self.get_connector()[0].get_exceptions()
def get_table_names(self):
"""Returns a list of the table names.
:since: 1.1.6
"""
with self.env.db_query as db:
return db.get_table_names()
def set_database_version(self, version, name='database_version'):
"""Sets the database version in the SYSTEM table.
:param version: an integer database version.
:param name: The name of the entry that contains the database version
in the SYSTEM table. Defaults to `database_version`,
which contains the database version for Trac.
"""
current_database_version = self.get_database_version(name)
if current_database_version is False:
self.env.db_transaction("""
INSERT INTO system (name, value) VALUES (%s, %s)
""", (name, version))
else:
self.env.db_transaction("""
UPDATE system SET value=%s WHERE name=%s
""", (version, name))
self.log.info("Upgraded %s from %d to %d",
name, current_database_version, version)
def needs_upgrade(self, version, name='database_version'):
"""Checks the database version to determine if an upgrade is needed.
:param version: the expected integer database version.
:param name: the name of the entry in the SYSTEM table that contains
the database version. Defaults to `database_version`,
which contains the database version for Trac.
:return: `True` if the stored version is less than the expected
version, `False` if it is equal to the expected version.
:raises TracError: if the stored version is greater than the expected
version.
"""
dbver = self.get_database_version(name)
if dbver == version:
return False
elif dbver > version:
raise TracError(_("Need to downgrade %(name)s.", name=name))
self.log.info("Need to upgrade %s from %d to %d",
name, dbver, version)
return True
def upgrade(self, version, name='database_version', pkg=None):
"""Invokes `do_upgrade(env, version, cursor)` in module
`"%s/db%i.py" % (pkg, version)`, for each required version upgrade.
:param version: the expected integer database version.
:param name: the name of the entry in the SYSTEM table that contains
the database version. Defaults to `database_version`,
which contains the database version for Trac.
:param pkg: the package containing the upgrade modules.
:raises TracError: if the package or module doesn't exist.
"""
dbver = self.get_database_version(name)
for i in range(dbver + 1, version + 1):
module = 'db%i' % i
try:
upgrades = __import__(pkg, globals(), locals(), [module])
except ImportError:
raise TracError(_("No upgrade package %(pkg)s", pkg=pkg))
try:
script = getattr(upgrades, module)
except AttributeError:
raise TracError(_("No upgrade module %(module)s.py",
module=module))
with self.env.db_transaction as db:
cursor = db.cursor()
script.do_upgrade(self.env, i, cursor)
self.set_database_version(i, name)
def shutdown(self, tid=None):
if self._cnx_pool:
self._cnx_pool.shutdown(tid)
if not tid:
self._cnx_pool = None
def backup(self, dest=None):
"""Save a backup of the database.
:param dest: base filename to write to.
Returns the file actually written.
"""
connector, args = self.get_connector()
if not dest:
backup_dir = self.backup_dir
if not os.path.isabs(backup_dir):
backup_dir = os.path.join(self.env.path, backup_dir)
db_str = self.config.get('trac', 'database')
db_name, db_path = db_str.split(":", 1)
dest_name = '%s.%i.%d.bak' % (db_name, self.env.database_version,
int(time.time()))
dest = os.path.join(backup_dir, dest_name)
else:
backup_dir = os.path.dirname(dest)
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
return connector.backup(dest)
def get_connector(self):
scheme, args = parse_connection_uri(self.connection_uri)
candidates = [
(priority, connector)
for connector in self.connectors
for scheme_, priority in connector.get_supported_schemes()
if scheme_ == scheme
]
if not candidates:
raise TracError(_('Unsupported database type "%(scheme)s"',
scheme=scheme))
priority, connector = max(candidates)
if priority < 0:
raise TracError(connector.error)
if scheme == 'sqlite':
if args['path'] == ':memory:':
# Special case for SQLite in-memory database, always get
# the /same/ connection over
pass
elif not os.path.isabs(args['path']):
# Special case for SQLite to support a path relative to the
# environment directory
args['path'] = os.path.join(self.env.path,
args['path'].lstrip('/'))
if self.debug_sql:
args['log'] = self.log
return connector, args
_get_connector = get_connector # For 0.11 compatibility
def get_column_names(cursor):
"""Retrieve column names from a cursor, if possible."""
return [unicode(d[0], 'utf-8') if isinstance(d[0], str) else d[0]
for d in cursor.description] if cursor.description else []
def parse_connection_uri(db_str):
"""Parse the database connection string.
The database connection string for an environment is specified through
the `database` option in the `[trac]` section of trac.ini.
:return: a tuple containing the scheme and a dictionary of attributes:
`user`, `password`, `host`, `port`, `path`, `params`.
:since: 1.1.3
"""
if not db_str:
section = tag.a("[trac]",
title=_("TracIni documentation"),
class_='trac-target-new',
href='http://trac.edgewall.org/wiki/TracIni'
'#trac-section')
raise ConfigurationError(
tag_("Database connection string is empty. Set the %(option)s "
"configuration option in the %(section)s section of "
"trac.ini. Please refer to the %(doc)s for help.",
option=tag.code("database"), section=section,
doc=_doc_db_str()))
try:
scheme, rest = db_str.split(':', 1)
except ValueError:
raise _invalid_db_str(db_str)
if not rest.startswith('/'):
if scheme == 'sqlite' and rest:
# Support for relative and in-memory SQLite connection strings
host = None
path = rest
else:
raise _invalid_db_str(db_str)
else:
if not rest.startswith('//'):
host = None
rest = rest[1:]
elif rest.startswith('///'):
host = None
rest = rest[3:]
else:
rest = rest[2:]
if '/' in rest:
host, rest = rest.split('/', 1)
else:
host = rest
rest = ''
path = None
if host and '@' in host:
user, host = host.split('@', 1)
if ':' in user:
user, password = user.split(':', 1)
else:
password = None
if user:
user = urllib.unquote(user)
if password:
password = unicode_passwd(urllib.unquote(password))
else:
user = password = None
if host and ':' in host:
host, port = host.split(':', 1)
try:
port = int(port)
except ValueError:
raise _invalid_db_str(db_str)
else:
port = None
if not path:
path = '/' + rest
if os.name == 'nt':
# Support local paths containing drive letters on Win32
if len(rest) > 1 and rest[1] == '|':
path = "%s:%s" % (rest[0], rest[2:])
params = {}
if '?' in path:
path, qs = path.split('?', 1)
qs = qs.split('&')
for param in qs:
try:
name, value = param.split('=', 1)
except ValueError:
raise _invalid_db_str(db_str)
value = urllib.unquote(value)
params[name] = value
args = zip(('user', 'password', 'host', 'port', 'path', 'params'),
(user, password, host, port, path, params))
return scheme, dict([(key, value) for key, value in args if value])
# Compatibility for Trac < 1.1.3. Will be removed in 1.3.1.
_parse_db_str = parse_connection_uri
def _invalid_db_str(db_str):
return ConfigurationError(
tag_("Invalid format %(db_str)s for the database connection string. "
"Please refer to the %(doc)s for help.",
db_str=tag.code(db_str), doc=_doc_db_str()))
def _doc_db_str():
return tag.a(_("documentation"),
title=_("Database Connection Strings documentation"),
class_='trac-target-new',
href='http://trac.edgewall.org/wiki/'
'TracIni#DatabaseConnectionStrings')
|
|
#!/usr/bin/python
# TBD: d7m5
# TBD: complexity
# TBD: w+b, w-b
# TBD: percentile
# DONE: alt_scores, mean of score in each block
# DONE: final score
# DONE: index of 1st move where ahead X (100)
# DONE debug final_score
# DONE mean/median/dev raw score
import chess_util
import cjson
import gflags
import glob
import leveldb
import numpy
import os.path
import sets
import sys
from collections import namedtuple
FLAGS = gflags.FLAGS
gflags.DEFINE_string('analysis', 'd19.leveldb', ("""Analysis database.
Key = 'simple FEN' """ ))
gflags.DEFINE_string('game_stages', 'generated/game_stages.csv', '')
gflags.DEFINE_string('model_dir', '.', '')
gflags.DEFINE_integer('limit', 1000, '')
gflags.DEFINE_string('key_prefix', '', 'Something like d19_ to keep track of the analysis this came from')
gflags.DEFINE_bool('debug', False, '')
gflags.DEFINE_bool('verbose', False, 'More verbose data in output such as raw arrays')
# White/Black multiplier to convert scores back to white-based scores
kColorMul = { 0: 1,
1: -1}
files = 0
game_stages = {} # [event] = (mg ply, eg ply)
# return dict of key=event, value= tuple (start of middle game ply, end game ply)
def ReadGameStages():
res = {}
for line in file(FLAGS.game_stages).read().splitlines():
ar = line.split(',')
res[ar[0]] = (int(ar[1]), int(ar[2]))
return res
def safe_max(ar):
if len(ar) == 0:
return 0
return max(ar)
def avg(ar):
n = len(ar)
if n == 0:
return 0
return sum(ar) / float(len(ar))
ParseResult = {
'1-0': 1.0,
'0-1': -1.0,
'1/2-1/2': 0.0
}
ParseResultFlip = {
'1-0': -1.0,
'0-1': 1.0,
'1/2-1/2': 0.0
}
class Position(object):
def __init__(self, mp):
self._map = mp
fen = property(lambda me: me._map['fen'])
move = property(lambda me: me._map['move'])
san = property(lambda me: me._map['san'])
ply = property(lambda me: me._map['ply'])
num_legal_moves = property(lambda me: me._map['num_legal_moves'])
class Game(object):
def __init__(self, f):
self._map = cjson.decode(f.read())
positions = property(lambda me: (Position(pos) for pos in me._map['positions']))
event = property(lambda me: me._map['event'])
black_elo = property(lambda me: me._map.get('black_elo', 0))
white_elo = property(lambda me: me._map.get('white_elo', 0))
result = property(lambda me: me._map['result'])
class GameAnalysis(object):
def __init__(self, mp):
self._map = mp
# Final depth
depth = property(lambda me: me._map['depth'])
moves = property(lambda me: me._map['moves'])
# Analysis of all moves.
# There can be multiple entries at the same depth.
analysis = property(lambda me: (Analysis(a) for a in me._map['analysis']))
extra = property(lambda me: me._map['extra'])
# Analysis of one move (pv[0]) at one depth
class Analysis(object):
def __init__(self, mp):
self._map = mp
depth = property(lambda me: me._map['depth'])
score = property(lambda me: me._map['score'])
pv = property(lambda me: me._map['pv'])
nodes = property(lambda me: me._map['nodes'])
multipv = property(lambda me: me._map['multipv'])
# Static info about a game
GameInfo = namedtuple('GameInfo', ['event',
'white_elo',
'black_elo',
'co_elo',
'result',
'co_result',
'raw_scores',
'co_deltas_op',
'co_deltas_mg',
'co_deltas_eg',
'co_deltas',
'co_scores',
'first_loss',
'final_score',
'co_ply_ahead_50',
'co_ply_ahead_100',
'alt_stages',
'complexity',
'ranks'
])
# yields ply, co, position, analysis
def GenerateAnalysis(db, game):
for ply, pos in enumerate(game.positions):
co = ply % 2
simple = chess_util.SimplifyFen(pos.fen)
try:
raw = db.Get(simple)
if False:
obj = cjson.decode(raw)
for n in sorted(obj.keys()):
print 'raw: ', n, obj[n]
for ent in obj['analysis']:
print '\t', ent
except KeyError:
continue
simple_pos = simple.split(' ')[0]
analysis = GameAnalysis(cjson.decode(raw))
yield ply, co, pos, analysis
def FindBestLine(analysis):
move_map = {} # key=move value=score
best_line = None
for i, line in enumerate(analysis.analysis):
if line.depth != analysis.depth:
continue
# First line found at target depth must be the best
if best_line is None:
best_line = line
move_map[line.pv[0]] = line.score
return (best_line, move_map)
def CalculateComplexity(game, mega):
complexity = [[], []]
for ply, co, pos, analysis in mega:
#(best_line, move_map) = FindBestLine(analysis) # wasteful redundant call
# line: type Analysis
#print 'XXX ply=', ply, 'ad=', analysis.depth
lastd = 0
lastm = None
lasts = 0
running = []
for line in analysis.analysis:
if line.multipv != 1:
continue
if line.depth <= lastd:
continue # we are looping onto a block that is not the best move
lastd = line.depth
if lastm is not None and line.pv[0] != lastm:
running.append(abs(lasts - line.score))
lastm = line.pv[0]
lasts = line.score
#if line.pv[0] != best_line.pv[0]:
#continue
#print 'a: d=', line.depth, 'm', line.multipv, 's', line.score, line.pv[0]
#print '\t', running
if len(running) == 0:
complexity[co].append(0)
else:
complexity[co].append(numpy.mean(running))
return complexity
def CalculateBestRankHere(game, pos, analysis):
found_depth = False
for line in analysis.analysis:
if line.depth != analysis.depth:
if found_depth:
return 0
continue
found_depth = True
if line.pv[0] == pos.move:
return line.multipv
return 0
def CalculateBestRankAgreement(game, mega):
br = [[], []]
for ply, co, pos, analysis in mega:
br[co].append(CalculateBestRankHere(game, pos, analysis))
return br
def CalculateDeltasAndScores(game, mega, stages):
#print 'st', stages
raw_scores = [[], []]
deltas = [[], []]
deltas_opening = [[], []]
deltas_midgame = [[], []]
deltas_endgame = [[], []]
scores = [[], []]
final_score = 0
co_ply_ahead_50 = [0, 0]
co_ply_ahead_100 = [0, 0]
for ply, co, pos, analysis in mega:
(best_line, move_map) = FindBestLine(analysis)
best_move = best_line.pv[0]
final_score = kColorMul[co] * move_map[best_move]
raw_scores[co].append(final_score)
if FLAGS.debug:
print
print 'Analysis: ply=', ply, ' co=', co
for a in analysis.analysis:
print '\t', a.multipv, a.depth, a.score, a.pv
print 'final: ', final_score
if FLAGS.debug:
print 'DBG: ',co, game.event, best_move, ply, pos.move, move_map[best_move], move_map[pos.move]
if co_ply_ahead_50[co] == 0 and move_map[pos.move] >= 50:
co_ply_ahead_50[co] = ply
if co_ply_ahead_100[co] == 0 and move_map[pos.move] >= 100:
co_ply_ahead_100[co] = ply
scores[co].append([move_map[best_move], move_map[pos.move]])
if ply <= stages[0]:
delta2 = deltas_opening
elif ply <= stages[1]:
delta2 = deltas_midgame
else:
delta2 = deltas_endgame
if pos.move == best_move:
deltas[co].append(0)
delta2[co].append(0)
else:
delta = move_map[best_move] - move_map[pos.move]
if delta == 0:
# Regan gives a correction of -0.03 if an equal move was chosen
# but which wasn't the 1st rank.
deltas[co].append(3)
delta2[co].append(3)
else:
deltas[co].append(max(3, delta))
delta2[co].append(max(3, delta))
if FLAGS.debug:
print 'Scores[0]: ', scores[0]
print 'Deltas[0]: ', deltas[0]
print 'Scores[1]: ', scores[1]
print 'Deltas[1]: ', deltas[1]
return raw_scores, deltas, deltas_opening, deltas_midgame, deltas_endgame, scores, final_score, co_ply_ahead_50, co_ply_ahead_100
def CalculateAltStages(deltas):
res = [ {}, {} ]
for co in [0, 1]:
for block in [0, 1, 2, 3, 4]:
res[co][block] = deltas[co][block * 10 : (1 + block) * 10]
return res
def CalculateFirstLoss(deltas):
(first_loss_100, first_loss_200, first_loss_300) = (0, 0, 0)
for ply, delta in enumerate(deltas):
if delta >= 100 and first_loss_100 == 0:
first_loss_100 = ply
if delta >= 200 and first_loss_200 == 0:
first_loss_200 = ply
if delta >= 300 and first_loss_300 == 0:
first_loss_300 = ply
return (first_loss_100, first_loss_200, first_loss_300)
def StudyGame(db, fn):
global game_stages
with file(fn) as f:
game = Game(f)
stages = game_stages[game.event]
mega = list(GenerateAnalysis(db, game))
complexity = CalculateComplexity(game, mega)
ranks = CalculateBestRankAgreement(game, mega)
(raw_scores, deltas, deltas_op, deltas_mg, deltas_eg, scores, final_score, co_ply_ahead_50, co_ply_ahead_100) = CalculateDeltasAndScores(game, mega, stages)
alt_stages = CalculateAltStages(deltas)
first_loss = [CalculateFirstLoss(deltas[0]), CalculateFirstLoss(deltas[1])]
return GameInfo(final_score = final_score,
result = ParseResult[game.result],
co_result = [ParseResult[game.result],
ParseResultFlip[game.result]],
complexity = complexity,
ranks = ranks,
raw_scores = raw_scores,
alt_stages = alt_stages,
co_ply_ahead_50 = co_ply_ahead_50,
co_ply_ahead_100 = co_ply_ahead_100,
first_loss = first_loss,
co_deltas_op = deltas_op,
co_deltas_mg = deltas_mg,
co_deltas_eg = deltas_eg,
co_deltas = deltas,
co_scores = scores,
event = game.event,
white_elo = game.white_elo,
black_elo = game.black_elo,
co_elo = [game.white_elo,
game.black_elo])
def ProcessArgs(db, limit, argv):
global files
if len(argv) > 0:
yield StudyGame(db, argv[0])
else:
for event in range(1, limit + 1):
fn = 'generated/game2json/%05d.json' % event
yield StudyGame(db, fn)
files += 1
if files >= limit:
break
def ReadOpeningPositions(fn):
res = set()
with open(fn) as f:
for line in (line.strip() for line in f.readlines()):
ar = line.split(',')
res.add(ar[1].split(' ')[0]) # just position part of FEN
return sets.ImmutableSet(res)
def ProcessDeltas(gi, co, deltas):
dampened_deltas = [min(300, delta) for delta in deltas]
(delta_median, delta_stddev, delta_avg) = (0, 0, 0)
if len(dampened_deltas) > 0:
delta_avg = numpy.mean(dampened_deltas)
delta_median = numpy.median(dampened_deltas)
delta_stddev = numpy.std(dampened_deltas)
return (delta_median, delta_stddev, delta_avg)
def main(argv):
global game_stages
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
game_stages = ReadGameStages()
db = leveldb.LevelDB(FLAGS.analysis, max_open_files=100)
for gi_num, gi in enumerate(ProcessArgs(db, FLAGS.limit, argv[1:])):
if FLAGS.debug:
print
print "##### gi_num: ", gi_num, " gi: ", gi
#print gi.raw_scores[0]
#print gi.raw_scores[1]
print
for co in [0, 1]:
(_, _, delta_avg_op) = ProcessDeltas(gi, co, gi.co_deltas_op[co])
(_, _, delta_avg_mg) = ProcessDeltas(gi, co, gi.co_deltas_mg[co])
(_, _, delta_avg_eg) = ProcessDeltas(gi, co, gi.co_deltas_eg[co])
(delta_median, delta_stddev, delta_avg) = ProcessDeltas(gi, co, gi.co_deltas[co])
num_ranks = max(1.0, float(len(gi.ranks[co])))
pct_best = sum(r == 1 for r in gi.ranks[co]) / num_ranks
pct_best2 = sum((r == 1 or r == 2) for r in gi.ranks[co]) / num_ranks
pct_best3 = sum((r == 1 or r == 2 or r == 3) for r in gi.ranks[co]) / num_ranks
if len(gi.raw_scores[co]) == 0:
gi.raw_scores[co] = [0]
if len(gi.complexity[co]) == 0:
gi.complexity[co] = [0]
standard = {
'$g_event': gi.event,
'$g_co_rating': gi.co_elo[co],
'$g_co': ["w", "b"][co],
'result': gi.co_result[co],
'color_value': [1, -1][co],
'delta_max': safe_max(gi.co_deltas[co]),
'delta_avg': delta_avg,
'delta_avg_op': delta_avg_op,
'delta_avg_mg': delta_avg_mg,
'delta_avg_eg': delta_avg_eg,
'delta_avg': delta_avg,
'delta_avg': delta_avg,
'delta_median': delta_median,
'delta_stddev': delta_stddev,
'final_score': gi.final_score,
'raw_score_mean': numpy.mean(gi.raw_scores[co]),
'raw_score_median': numpy.median(gi.raw_scores[co]),
'raw_score_stddev': numpy.std(gi.raw_scores[co]),
'ply_ahead_50': gi.co_ply_ahead_50[co],
'ply_ahead_100': gi.co_ply_ahead_100[co],
'first_loss_100': gi.first_loss[co][0],
'first_loss_200': gi.first_loss[co][1],
'first_loss_300': gi.first_loss[co][2],
'complexity': numpy.mean(gi.complexity[co]),
'pct_best': pct_best,
'pct_best2': pct_best2,
'pct_best3': pct_best3,
}
for which in [0, 1, 2, 3, 4]:
key = 'alt_stages_%d' % which
if len(gi.alt_stages[co][which]) == 0:
standard[key] = 0
else:
standard[key] = numpy.mean([min(300, delta) for delta in gi.alt_stages[co][which]])
for which in [0, 1, 2, 3, 4]:
key = 'alt_raw_%d' % which
key2 = 'alt_raw_stddev_%d' % which
slice = gi.raw_scores[co][which * 10 : (which + 1) * 10]
if len(slice) == 0:
standard[key] = 0
standard[key2] = 0
else:
standard[key] = numpy.mean(slice)
standard[key2] = numpy.std(slice)
if FLAGS.verbose:
standard['$g_co_deltas'] = gi.co_deltas[co]
standard['$g_co_scores'] = gi.co_scores[co]
if FLAGS.key_prefix != '':
standard2 = {}
for n, v in standard.iteritems():
if n[0] == '$' or n == 'result' or n == 'color_value':
standard2[n] = v
else:
standard2[FLAGS.key_prefix + n] = v
standard = standard2
if FLAGS.debug:
for n in sorted(standard.keys()):
print n, standard[n]
print
else:
print cjson.encode(standard)
if __name__ == '__main__':
numpy.seterr(all = 'raise')
main(sys.argv)
|
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import itertools
import math
import os
import re
import time
from oslo_concurrency import processutils as putils
from oslo_utils import excutils
from cinder.brick import exception
from cinder.brick import executor
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
class LVM(executor.Executor):
"""LVM object to enable various LVM related operations."""
LVM_CMD_PREFIX = ['env', 'LC_ALL=C']
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param root_helper: Execution root_helper method to use
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
self.vg_provisioned_capacity = 0.0
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume Group'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error(_LE('Unable to locate Volume Group %s') % vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
self.create_thin_pool(pool_name)
else:
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
if lvm_conf and os.path.isfile(lvm_conf):
LVM.LVM_CMD_PREFIX = ['env',
'LC_ALL=C',
'LVM_SYSTEM_DIR=/etc/cinder']
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'name', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper=self._root_helper, run_as_root=True)
def _get_vg_uuid(self):
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'uuid', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
return out.split()
else:
return []
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
"""Returns available thin pool free space.
:param vg_name: the vg where the pool is placed
:param thin_pool_name: the thin pool to gather info for
:returns: Free space in GB (float), calculated using data_percent
"""
cmd = LVM.LVM_CMD_PREFIX +\
['lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator',
':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error querying thin pool about data_percent'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
"""Static method to get LVM version from system.
:param root_helper: root_helper to use for execute
:returns: version 3-tuple
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
"""Property indicating whether snap activation changes are supported.
Check for LVM version >= 2.02.91.
(LVM2 git: e8a40f6 Allow to activate snapshot)
:returns: True/False indicating support
"""
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
"""Property indicating whether lvchange can ignore skip activation.
Check for LVM version >= 2.02.99.
(LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange)
"""
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def get_lv_info(root_helper, vg_name=None, lv_name=None):
"""Retrieve info about LVs (all, in a VG, or a single LV).
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:param lv_name: optional, gathers info for only the specified LV
:returns: List of Dictionaries with LV info
"""
cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix']
if lv_name is not None and vg_name is not None:
cmd.append("%s/%s" % (vg_name, lv_name))
elif vg_name is not None:
cmd.append(vg_name)
lvs_start = time.time()
try:
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr:
ctx.reraise = False
msg = _LI("'Not found' when querying LVM info. "
"(vg_name=%(vg)s, lv_name=%(lv)s")
LOG.info(msg, {'vg': vg_name, 'lv': lv_name})
out = None
total_time = time.time() - lvs_start
if total_time > 60:
LOG.warning(_LW('Took %s seconds to get logical volume info.'),
total_time)
lv_list = []
if out is not None:
volumes = out.split()
for vg, name, size in itertools.izip(*[iter(volumes)] * 3):
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self, lv_name=None):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
return self.get_lv_info(self._root_helper,
self.vg_name,
lv_name)
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes(name)
for r in ref_list:
if r['name'] == name:
return r
return None
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
"""Static method to get all PVs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
field_sep = '|'
cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', field_sep,
'--nosuffix']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]]
pv_list = []
for pv in pvs:
fields = pv.split(field_sep)
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
def get_physical_volumes(self):
"""Get all PVs associated with this instantiation (VG).
:returns: List of Dictionaries with PV info
"""
self.pv_list = self.get_all_physical_volumes(self._root_helper,
self.vg_name)
return self.pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
"""Static method to get all VGs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
start_vgs = time.time()
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
total_time = time.time() - start_vgs
if total_time > 60:
LOG.warning(_LW('Took %s seconds to get '
'volume groups.'), total_time)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error(_LE('Unable to find VG: %s') % self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
total_vols_size = 0.0
if self.vg_thin_pool is not None:
# NOTE(xyang): If providing only self.vg_name,
# get_lv_info will output info on the thin pool and all
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg
# stack-vg stack-pool 9.51
# stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
# stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
# stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
#
# If providing both self.vg_name and self.vg_thin_pool,
# get_lv_info will output only info on the thin pool, but not
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg/stack-pool
# stack-vg stack-pool 9.51
#
# We need info on both the thin pool and the volumes,
# therefore we should provide only self.vg_name, but not
# self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
self.vg_name):
lvsize = lv['size']
# get_lv_info runs "lvs" command with "--nosuffix".
# This removes "g" from "1.00g" and only outputs "1.00".
# Running "lvs" command without "--nosuffix" will output
# "1.00g" if "g" is the unit.
# Remove the unit if it is in lv['size'].
if not lv['size'][-1].isdigit():
lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = lvsize
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
else:
total_vols_size = total_vols_size + float(lvsize)
total_vols_size = round(total_vols_size, 2)
self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
Ideally we would use 100% of the containing volume group and be done.
But the 100%VG notation to lvcreate is not implemented and thus cannot
be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347
Further, some amount of free space must remain in the volume group for
metadata for the contained logical volumes. The exact amount depends
on how much volume sharing you expect.
:returns: An lvcreate-ready string for the number of calculated bytes.
"""
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
:returns: The size string passed to the lvcreate command
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error(_LE('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = ['lvcreate', '-T', '-L', size_str, vg_pool_name]
LOG.debug('Created thin pool \'%(pool)s\' with size %(size)s of '
'total %(free)sg' % {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path]
else:
cmd = ['lvcreate', '-n', name, self.vg_name, '-L', size_str]
if mirror_count > 0:
cmd.extend(['-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error(_LE("Trying to create snapshot by non-existent LV: %s")
% source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = ['lvcreate', '--name', name,
'--snapshot', '%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating snapshot'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def activate_lv(self, name, is_snapshot=False):
"""Ensure that logical volume/snapshot logical volume is activated.
:param name: Name of LV to activate
:raises: putils.ProcessExecutionError
"""
# This is a no-op if requested for a snapshot on a version
# of LVM that doesn't support snapshot activation.
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
cmd.append('-K')
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating LV'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
raise
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
def run_udevadm_settle():
self._execute('udevadm', 'settle',
root_helper=self._root_helper, run_as_root=True,
check_exit_code=False)
# LV removal seems to be a race with other writers or udev in
# some cases (see LP #1270192), so we enable retry deactivation
LVM_CONFIG = 'activation { retry_deactivation = 1} '
try:
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
mesg = (_('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s') %
{'command': err.cmd, 'response': err.stderr})
LOG.debug(mesg)
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
# The previous failing lvremove -f might leave behind
# suspended devices; when lvmetad is not available, any
# further lvm command will block forever.
# Therefore we need to skip suspended devices on retry.
LVM_CONFIG += 'devices { ignore_suspended_devices = 1}'
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
LOG.debug('Successfully deleted volume: %s after '
'udev settle.', name)
def revert(self, snapshot_name):
"""Revert an LV from snapshot.
:param snapshot_name: Name of snapshot to revert
"""
self._execute('lvconvert', '--merge',
snapshot_name, root_helper=self._root_helper,
run_as_root=True)
def lv_has_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def extend_volume(self, lv_name, new_size):
"""Extend the size of an existing volume."""
try:
self._execute('lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name),
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending Volume'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
raise
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
# consume the smallest disk
disk = disks[-1]
disks = disks[:-1]
# match extents for each mirror on the largest disks
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
"""Change the name of an existing volume."""
try:
self._execute('lvrename', self.vg_name, lv_name, new_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error renaming logical volume'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
raise
|
|
#from numpy import np.dot, arange, abs
#from numpy.linalg import lstsq
import numpy as np
import random
def computeErrorAndCount(A,b,x,group,tol):
error = np.abs(b - np.dot(A,x))
tot_error = 0.0
count = 0
inliers = np.zeros(b.shape,dtype=np.bool)
for j in range(0,len(error),group):
e = 0
for l in range(group):
e += error[j+l]**2
e = np.sqrt(e)
if e < tol:
count += 1
tot_error += e
for l in range(group):
inliers[j+l] = True
if count == 0:
return tot_error,count,inliers
return tot_error/count,count,inliers
##
# Uses the RANSAC algorithm to solve Ax=b
#
# M.A. Fischler and R.C. Bolles. Random sample consensus: A paradigm for
# model fitting with applications to image analysis and automated cartography.
# Communication of Association for Computing Machinery, 24(6): 381--395, 1981.
def RANSAC(A,b,count=None,tol=1.0,niter=None,group=1,verbose=False,full_output=False):
#n = len(y.flatten())
n,k = A.shape
group = int(group)
assert group > 0
assert n % group == 0
assert n >= k
tmp = np.arange(n/group)
if niter == None:
niter = n/group
bestx = np.linalg.lstsq(A,b)[0]
besterror,bestcount,bestinliers = computeErrorAndCount(A,b,bestx,group,tol)
if verbose: print "New Best (LS):",bestcount,besterror,float(bestcount*group)/n
if bestcount == n/group:
if full_output:
return bestx,bestcount,besterror,bestinliers
return bestx
#bestcount = 0
#besterror = 0.0
for _ in xrange(niter):
sample = random.sample(tmp,k/group)
new_sample = []
for j in sample:
for l in range(group):
new_sample.append(group*j+l)
sample = new_sample
ty = b[sample,:]
tX = A[sample,:]
#print tX.shape,ty.shape
try:
x = np.linalg.lstsq(tX,ty)[0]
except:
continue
error,count,inliers = computeErrorAndCount(A,b,x,group,tol)
if bestcount < count or (bestcount == count and error < besterror):
bestcount = count
besterror = error
bestx = x
bestinliers = inliers
if verbose: print " New Best:",bestcount,besterror,float(bestcount*group)/n
#print x, count, bestcount
x = bestx
#refine the estimate
#error,count,inliers = computeErrorAndCount(A,b,bestx,group,tol)
inliers = bestinliers
for _ in xrange(10):
ty = b[inliers,:]
tX = A[inliers,:]
try:
x = np.linalg.lstsq(tX,ty)[0]
except:
continue
error,count,inliers = computeErrorAndCount(A,b,x,group,tol)
#if verbose: print " ",error,count,x
if bestcount < count or (bestcount == count and error < besterror):
bestcount = count
besterror = error
bestx = x
bestinliers = inliers
if verbose: print "Improved Best:",bestcount,besterror,float(bestcount*group)/n
#new_inliers = nonzero(abs(b - np.dot(A,x)) < tol)[0]
#if list(new_inliers) == list(inliers):
# break
#inliers = new_inliers
if full_output:
return bestx,bestcount,besterror,bestinliers
return bestx
def _quantile(errors,quantile):
errors = errors.copy()
i = int(quantile*errors.shape[0])
errors.sort()
return errors[i]
##
# Uses the LMeDs algorithm to solve Ax=b
#
# M.A. Fischler and R.C. Bolles. Random sample consensus: A paradigm for
# model fitting with applications to image analysis and automated cartography.
# Communication of Association for Computing Machinery, 24(6): 381--395, 1981.
def LMeDs(A,b,quantile=0.75,N = None,verbose=True):
#n = len(y.flatten())
n,k = A.shape
tmp = np.arange(n)
best_sample = tmp
x = bestx = np.linalg.lstsq(A,b)[0]
best_error = _quantile(np.abs(b - np.dot(A,x)),quantile)
#print "LMeDs Error:",best_error
if N == None:
N = n
for i in range(N):
sample = random.sample(tmp,k)
ty = b[sample,:]
tX = A[sample,:]
try:
x = np.linalg.lstsq(tX,ty)[0]
except:
continue
med_error = _quantile(np.abs(b - np.dot(A,x)),quantile)
if med_error < best_error:
#print " Error:",best_error
best_sample = sample
best_error = med_error
bestx = x
#print x, count, bestcount
x = bestx
#refine the estimate using local search
#print " Local Search"
sample = np.zeros([n],dtype=np.bool)
sample[best_sample] = True
best_sample = sample
random.shuffle(tmp)
keep_going = True
while keep_going:
#print " Iter"
keep_going = False
for i in tmp:
sample = best_sample.copy()
sample[i] = not sample[i]
ty = b[sample,:]
tX = A[sample,:]
try:
x = np.linalg.lstsq(tX,ty)[0]
except:
continue
med_error = _quantile(np.abs(b - np.dot(A,x)),quantile)
if med_error < best_error or (med_error == best_error and best_sample.sum() < sample.sum()):
#print " Error:",best_error
keep_going = True
best_sample = sample
best_error = med_error
bestx = x
#inliers = nonzero(np.abs(b - np.dot(A,x)) < tol)[0]
#for i in range(10):
# ty = b[inliers,:]
# tX = A[inliers,:]
# x = np.linalg.lstsq(tX,ty)[0]
# new_inliers = nonzero(np.abs(b - np.dot(A,x)) < tol)[0]
# if list(new_inliers) == list(inliers):
# break
# inliers = new_inliers
return x
if __name__ == '__main__':
A = []
b = []
#print dir(random)
for x in range(40):
b.append( 10*x + 5 + random.normalvariate(0.0,2.0))
A.append([x,1])
A = np.array(A)
b = np.array(b)
b[0] = -20
print np.linalg.lstsq(A,b)[0]
print RANSAC(A,b,tol=6.0)
A = []
b = []
#print dir(random)
for y in range(-10,10):
for x in range(-10,10):
b.append( 15*y + 10*x + 5 + random.normalvariate(0.0,2.0))
A.append([x,y,1])
A = np.array(A)
b = np.array(b)
b[0] = -200000.
print np.linalg.lstsq(A,b)[0]
print RANSAC(A,b,group=2,tol=6,full_output = True,verbose=True)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import atexit
import datetime
import functools
import logging
import os
import re
import shutil
import socket
import sys
import uuid
import warnings
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import fixture as log_fixture
from oslo_log import log
from oslo_utils import timeutils
import oslotest.base as oslotest
from oslotest import mockpatch
from paste.deploy import loadwsgi
import six
from sqlalchemy import exc
from testtools import testcase
import webob
# NOTE(ayoung)
# environment.use_eventlet must run before any of the code that will
# call the eventlet monkeypatching.
from keystone.common import environment # noqa
environment.use_eventlet()
from keystone import auth
from keystone.common import config as common_cfg
from keystone.common import dependency
from keystone.common import kvs
from keystone.common.kvs import core as kvs_core
from keystone.common import sql
from keystone import config
from keystone import controllers
from keystone import exception
from keystone import notifications
from keystone.policy.backends import rules
from keystone.server import common
from keystone import service
from keystone.tests.unit import ksfixtures
config.configure()
LOG = log.getLogger(__name__)
PID = six.text_type(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..'))
VENDOR = os.path.join(ROOTDIR, 'vendor')
ETCDIR = os.path.join(ROOTDIR, 'etc')
def _calc_tmpdir():
env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR')
if not env_val:
return os.path.join(TESTSDIR, 'tmp', PID)
return os.path.join(env_val, PID)
TMPDIR = _calc_tmpdir()
CONF = cfg.CONF
log.register_options(CONF)
rules.init()
IN_MEM_DB_CONN_STRING = 'sqlite://'
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
os.makedirs(TMPDIR)
atexit.register(shutil.rmtree, TMPDIR)
class dirs(object):
@staticmethod
def root(*p):
return os.path.join(ROOTDIR, *p)
@staticmethod
def etc(*p):
return os.path.join(ETCDIR, *p)
@staticmethod
def tests(*p):
return os.path.join(TESTSDIR, *p)
@staticmethod
def tmp(*p):
return os.path.join(TMPDIR, *p)
@staticmethod
def tests_conf(*p):
return os.path.join(TESTCONF, *p)
# keystone.common.sql.initialize() for testing.
DEFAULT_TEST_DB_FILE = dirs.tmp('test.db')
class EggLoader(loadwsgi.EggLoader):
_basket = {}
def find_egg_entry_point(self, object_type, name=None):
egg_key = '%s:%s' % (object_type, name)
egg_ep = self._basket.get(egg_key)
if not egg_ep:
egg_ep = super(EggLoader, self).find_egg_entry_point(
object_type, name=name)
self._basket[egg_key] = egg_ep
return egg_ep
# NOTE(dstanek): class paths were remove from the keystone-paste.ini in
# favor of using entry points. This caused tests to slow to a crawl
# since we reload the application object for each RESTful test. This
# monkey-patching adds caching to paste deploy's egg lookup.
loadwsgi.EggLoader = EggLoader
@atexit.register
def remove_test_databases():
db = dirs.tmp('test.db')
if os.path.exists(db):
os.unlink(db)
pristine = dirs.tmp('test.db.pristine')
if os.path.exists(pristine):
os.unlink(pristine)
def generate_paste_config(extension_name):
# Generate a file, based on keystone-paste.ini, that is named:
# extension_name.ini, and includes extension_name in the pipeline
with open(dirs.etc('keystone-paste.ini'), 'r') as f:
contents = f.read()
new_contents = contents.replace(' service_v3',
' %s service_v3' % (extension_name))
new_paste_file = dirs.tmp(extension_name + '.ini')
with open(new_paste_file, 'w') as f:
f.write(new_contents)
return new_paste_file
def remove_generated_paste_config(extension_name):
# Remove the generated paste config file, named extension_name.ini
paste_file_to_remove = dirs.tmp(extension_name + '.ini')
os.remove(paste_file_to_remove)
def skip_if_cache_disabled(*sections):
"""This decorator is used to skip a test if caching is disabled.
Caching can be disabled either globally or for a specific section.
In the code fragment::
@skip_if_cache_is_disabled('assignment', 'token')
def test_method(*args):
...
The method test_method would be skipped if caching is disabled globally via
the `enabled` option in the `cache` section of the configuration or if
the `caching` option is set to false in either `assignment` or `token`
sections of the configuration. This decorator can be used with no
arguments to only check global caching.
If a specified configuration section does not define the `caching` option,
this decorator makes the same assumption as the `should_cache_fn` in
keystone.common.cache that caching should be enabled.
"""
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.cache.enabled:
raise testcase.TestSkipped('Cache globally disabled.')
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if not getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching disabled.' % s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_no_multiple_domains_support(f):
"""Decorator to skip tests for identity drivers limited to one domain."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_obj = args[0]
if not test_obj.identity_api.multiple_domains_supported:
raise testcase.TestSkipped('No multiple domains support')
return f(*args, **kwargs)
return wrapper
class UnexpectedExit(Exception):
pass
class TestClient(object):
def __init__(self, app=None, token=None):
self.app = app
self.token = token
def request(self, method, path, headers=None, body=None):
if headers is None:
headers = {}
if self.token:
headers.setdefault('X-Auth-Token', self.token)
req = webob.Request.blank(path)
req.method = method
for k, v in headers.items():
req.headers[k] = v
if body:
req.body = body
return req.get_response(self.app)
def get(self, path, headers=None):
return self.request('GET', path=path, headers=headers)
def post(self, path, headers=None, body=None):
return self.request('POST', path=path, headers=headers, body=body)
def put(self, path, headers=None, body=None):
return self.request('PUT', path=path, headers=headers, body=body)
def new_ref():
"""Populates a ref with attributes common to some API entities."""
return {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True}
def new_region_ref():
ref = new_ref()
# Region doesn't have name or enabled.
del ref['name']
del ref['enabled']
ref['parent_region_id'] = None
return ref
def new_service_ref():
ref = new_ref()
ref['type'] = uuid.uuid4().hex
return ref
def new_endpoint_ref(service_id, interface='public', default_region_id=None,
**kwargs):
ref = new_ref()
del ref['enabled'] # enabled is optional
ref['interface'] = interface
ref['service_id'] = service_id
ref['url'] = 'https://' + uuid.uuid4().hex + '.com'
ref['region_id'] = default_region_id
ref.update(kwargs)
return ref
def new_domain_ref():
ref = new_ref()
return ref
def new_project_ref(domain_id=None, parent_id=None, is_domain=False):
ref = new_ref()
ref['domain_id'] = domain_id
ref['parent_id'] = parent_id
ref['is_domain'] = is_domain
return ref
def new_user_ref(domain_id, project_id=None):
ref = new_ref()
ref['domain_id'] = domain_id
ref['email'] = uuid.uuid4().hex
ref['password'] = uuid.uuid4().hex
if project_id:
ref['default_project_id'] = project_id
return ref
def new_group_ref(domain_id):
ref = new_ref()
ref['domain_id'] = domain_id
return ref
def new_credential_ref(user_id, project_id=None, cred_type=None):
ref = dict()
ref['id'] = uuid.uuid4().hex
ref['user_id'] = user_id
if cred_type == 'ec2':
ref['type'] = 'ec2'
ref['blob'] = uuid.uuid4().hex
else:
ref['type'] = 'cert'
ref['blob'] = uuid.uuid4().hex
if project_id:
ref['project_id'] = project_id
return ref
def new_role_ref():
ref = new_ref()
# Roles don't have a description or the enabled flag
del ref['description']
del ref['enabled']
return ref
def new_policy_ref():
ref = new_ref()
ref['blob'] = uuid.uuid4().hex
ref['type'] = uuid.uuid4().hex
return ref
def new_trust_ref(trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None,
allow_redelegation=False):
ref = dict()
ref['id'] = uuid.uuid4().hex
ref['trustor_user_id'] = trustor_user_id
ref['trustee_user_id'] = trustee_user_id
ref['impersonation'] = impersonation or False
ref['project_id'] = project_id
ref['remaining_uses'] = remaining_uses
ref['allow_redelegation'] = allow_redelegation
if isinstance(expires, six.string_types):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = (
timeutils.utcnow() + datetime.timedelta(**expires)
).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
role_ids = role_ids or []
role_names = role_names or []
if role_ids or role_names:
ref['roles'] = []
for role_id in role_ids:
ref['roles'].append({'id': role_id})
for role_name in role_names:
ref['roles'].append({'name': role_name})
return ref
class BaseTestCase(oslotest.BaseTestCase):
"""Light weight base test class.
This is a placeholder that will eventually go away once the
setup/teardown in TestCase is properly trimmed down to the bare
essentials. This is really just a play to speed up the tests by
eliminating unnecessary work.
"""
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(mockpatch.PatchObject(sys, 'exit',
side_effect=UnexpectedExit))
self.useFixture(log_fixture.get_logging_handle_error_fixture())
warnings.filterwarnings('error', category=DeprecationWarning,
module='^keystone\\.')
warnings.simplefilter('error', exc.SAWarning)
self.addCleanup(warnings.resetwarnings)
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
:returns: a callable that uses a closure to delete instance attributes
"""
def cleanup():
for name in names:
# TODO(dstanek): remove this 'if' statement once
# load_backend in test_backend_ldap is only called once
# per test
if hasattr(self, name):
delattr(self, name)
return cleanup
class TestCase(BaseTestCase):
def config_files(self):
return []
def config_overrides(self):
# NOTE(morganfainberg): enforce config_overrides can only ever be
# called a single time.
assert self.__config_overrides_called is False
self.__config_overrides_called = True
signing_certfile = 'examples/pki/certs/signing_cert.pem'
signing_keyfile = 'examples/pki/private/signing_key.pem'
self.config_fixture.config(group='oslo_policy',
policy_file=dirs.etc('policy.json'))
self.config_fixture.config(
# TODO(morganfainberg): Make Cache Testing a separate test case
# in tempest, and move it out of the base unit tests.
group='cache',
backend='dogpile.cache.memory',
enabled=True,
proxies=['keystone.tests.unit.test_cache.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
driver='templated',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
group='kvs',
backends=[
('keystone.tests.unit.test_kvs.'
'KVSBackendForcedKeyMangleFixture'),
'keystone.tests.unit.test_kvs.KVSBackendFixture'])
self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='signing', certfile=signing_certfile,
keyfile=signing_keyfile,
ca_certs='examples/pki/certs/cacert.pem')
self.config_fixture.config(group='token', driver='kvs')
self.config_fixture.config(
group='saml', certfile=signing_certfile, keyfile=signing_keyfile)
self.config_fixture.config(
default_log_levels=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'routes.middleware=INFO',
'stevedore.extension=INFO',
'keystone.notifications=INFO',
'keystone.common._memcache_pool=INFO',
'keystone.common.ldap=INFO',
])
self.auth_plugin_config_override()
def auth_plugin_config_override(self, methods=None, **method_classes):
if methods is not None:
self.config_fixture.config(group='auth', methods=methods)
common_cfg.setup_authentication()
if method_classes:
self.config_fixture.config(group='auth', **method_classes)
def _assert_config_overrides_called(self):
assert self.__config_overrides_called is True
def setUp(self):
super(TestCase, self).setUp()
self.__config_overrides_called = False
self.addCleanup(CONF.reset)
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
self.addCleanup(delattr, self, 'config_fixture')
self.config(self.config_files())
# NOTE(morganfainberg): mock the auth plugin setup to use the config
# fixture which automatically unregisters options when performing
# cleanup.
def mocked_register_auth_plugin_opt(conf, opt):
self.config_fixture.register_opt(opt, group='auth')
self.useFixture(mockpatch.PatchObject(
common_cfg, '_register_auth_plugin_opt',
new=mocked_register_auth_plugin_opt))
self.config_overrides()
# NOTE(morganfainberg): ensure config_overrides has been called.
self.addCleanup(self._assert_config_overrides_called)
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# NOTE(morganfainberg): This code is a copy from the oslo-incubator
# log module. This is not in a function or otherwise available to use
# without having a CONF object to setup logging. This should help to
# reduce the log size by limiting what we log (similar to how Keystone
# would run under mod_wsgi or eventlet).
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
logger.setLevel(level_name)
self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
# tests aren't used.
self.addCleanup(dependency.reset)
self.addCleanup(kvs.INMEMDB.clear)
# Ensure Notification subscriptions and resource types are empty
self.addCleanup(notifications.clear_subscribers)
self.addCleanup(notifications.reset_notifier)
# Reset the auth-plugin registry
self.addCleanup(self.clear_auth_plugin_registry)
self.addCleanup(setattr, controllers, '_VERSIONS', [])
def config(self, config_files):
sql.initialize()
CONF(args=[], project='keystone', default_config_files=config_files)
def load_backends(self):
"""Initializes each manager and assigns them to an attribute."""
# TODO(blk-u): Shouldn't need to clear the registry here, but some
# tests call load_backends multiple times. These should be fixed to
# only call load_backends once.
dependency.reset()
# TODO(morganfainberg): Shouldn't need to clear the registry here, but
# some tests call load_backends multiple times. Since it is not
# possible to re-configure a backend, we need to clear the list. This
# should eventually be removed once testing has been cleaned up.
kvs_core.KEY_VALUE_STORE_REGISTRY.clear()
self.clear_auth_plugin_registry()
drivers, _unused = common.setup_backends(
load_extra_backends_fn=self.load_extra_backends)
for manager_name, manager in drivers.items():
setattr(self, manager_name, manager)
self.addCleanup(self.cleanup_instance(*list(drivers.keys())))
def load_extra_backends(self):
"""Override to load managers that aren't loaded by default.
This is useful to load managers initialized by extensions. No extra
backends are loaded by default.
:return: dict of name -> manager
"""
return {}
def load_fixtures(self, fixtures):
"""Hacky basic and naive fixture loading based on a python module.
Expects that the various APIs into the various services are already
defined on `self`.
"""
# NOTE(dstanek): create a list of attribute names to be removed
# from this instance during cleanup
fixtures_to_cleanup = []
# TODO(termie): doing something from json, probably based on Django's
# loaddata will be much preferred.
if (hasattr(self, 'identity_api') and
hasattr(self, 'assignment_api') and
hasattr(self, 'resource_api')):
for domain in fixtures.DOMAINS:
try:
rv = self.resource_api.create_domain(domain['id'], domain)
except exception.Conflict:
rv = self.resource_api.get_domain(domain['id'])
except exception.NotImplemented:
rv = domain
attrname = 'domain_%s' % domain['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for tenant in fixtures.TENANTS:
if hasattr(self, 'tenant_%s' % tenant['id']):
try:
# This will clear out any roles on the project as well
self.resource_api.delete_project(tenant['id'])
except exception.ProjectNotFound:
pass
rv = self.resource_api.create_project(
tenant['id'], tenant)
attrname = 'tenant_%s' % tenant['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for role in fixtures.ROLES:
try:
rv = self.role_api.create_role(role['id'], role)
except exception.Conflict:
rv = self.role_api.get_role(role['id'])
attrname = 'role_%s' % role['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for user in fixtures.USERS:
user_copy = user.copy()
tenants = user_copy.pop('tenants')
try:
existing_user = getattr(self, 'user_%s' % user['id'], None)
if existing_user is not None:
self.identity_api.delete_user(existing_user['id'])
except exception.UserNotFound:
pass
# For users, the manager layer will generate the ID
user_copy = self.identity_api.create_user(user_copy)
# Our tests expect that the password is still in the user
# record so that they can reference it, so put it back into
# the dict returned.
user_copy['password'] = user['password']
for tenant_id in tenants:
try:
self.assignment_api.add_user_to_project(
tenant_id, user_copy['id'])
except exception.Conflict:
pass
# Use the ID from the fixture as the attribute name, so
# that our tests can easily reference each user dict, while
# the ID in the dict will be the real public ID.
attrname = 'user_%s' % user['id']
setattr(self, attrname, user_copy)
fixtures_to_cleanup.append(attrname)
self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
def _paste_config(self, config):
if not config.startswith('config:'):
test_path = os.path.join(TESTSDIR, config)
etc_path = os.path.join(ROOTDIR, 'etc', config)
for path in [test_path, etc_path]:
if os.path.exists('%s-paste.ini' % path):
return 'config:%s-paste.ini' % path
return config
def loadapp(self, config, name='main'):
return service.loadapp(self._paste_config(config), name=name)
def clear_auth_plugin_registry(self):
auth.controllers.AUTH_METHODS.clear()
auth.controllers.AUTH_PLUGINS_LOADED = False
def assertCloseEnoughForGovernmentWork(self, a, b, delta=3):
"""Asserts that two datetimes are nearly equal within a small delta.
:param delta: Maximum allowable time delta, defined in seconds.
"""
msg = '%s != %s within %s delta' % (a, b, delta)
self.assertTrue(abs(a - b).seconds <= delta, msg)
def assertNotEmpty(self, l):
self.assertTrue(len(l))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp."""
try:
callable_obj(*args, **kwargs)
except expected_exception as exc_value:
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
if isinstance(exc_value.args[0], unicode):
if not expected_regexp.search(unicode(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, unicode(exc_value)))
else:
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException("%s not raised" % excName)
@property
def ipv6_enabled(self):
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6)
# NOTE(Mouad): Try to bind to IPv6 loopback ip address.
sock.bind(("::1", 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
def skip_if_no_ipv6(self):
if not self.ipv6_enabled:
raise self.skipTest("IPv6 is not enabled in the system")
def skip_if_env_not_set(self, env_var):
if not os.environ.get(env_var):
self.skipTest('Env variable %s is not set.' % env_var)
class SQLDriverOverrides(object):
"""A mixin for consolidating sql-specific test overrides."""
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
self.config_fixture.config(group='catalog', driver='sql')
self.config_fixture.config(group='identity', driver='sql')
self.config_fixture.config(group='policy', driver='sql')
self.config_fixture.config(group='revoke', driver='sql')
self.config_fixture.config(group='token', driver='sql')
self.config_fixture.config(group='trust', driver='sql')
|
|
import re
from datetime import datetime, timedelta
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from amo.tests import formset, initial
from addons.models import Addon
from applications.models import Application, AppVersion
from devhub.models import ActivityLog
from files.models import File, Platform
from users.models import UserProfile
from versions.models import ApplicationsVersions, Version
class TestVersion(amo.tests.TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
assert self.client.login(username='del@icio.us', password='password')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = self.get_addon()
self.version = Version.objects.get(id=81551)
self.url = self.addon.get_dev_url('versions')
self.disable_url = self.addon.get_dev_url('disable')
self.enable_url = self.addon.get_dev_url('enable')
self.delete_url = reverse('devhub.versions.delete', args=['a3615'])
self.delete_data = {'addon_id': self.addon.pk,
'version_id': self.version.pk}
def get_addon(self):
return Addon.objects.get(id=3615)
def get_doc(self):
res = self.client.get(self.url)
eq_(res.status_code, 200)
return pq(res.content)
def test_version_status_public(self):
doc = self.get_doc()
assert doc('#version-status')
self.addon.update(status=amo.STATUS_DISABLED, disabled_by_user=True)
doc = self.get_doc()
assert doc('#version-status .status-admin-disabled')
eq_(doc('#version-status strong').text(),
'This add-on has been disabled by Mozilla .')
self.addon.update(disabled_by_user=False)
doc = self.get_doc()
eq_(doc('#version-status strong').text(),
'This add-on has been disabled by Mozilla .')
self.addon.update(status=amo.STATUS_PUBLIC, disabled_by_user=True)
doc = self.get_doc()
eq_(doc('#version-status strong').text(),
'You have disabled this add-on.')
def test_no_validation_results(self):
doc = self.get_doc()
v = doc('td.file-validation').text()
eq_(re.sub(r'\s+', ' ', v),
'All Platforms Not validated. Validate now.')
eq_(doc('td.file-validation a').attr('href'),
reverse('devhub.file_validation',
args=[self.addon.slug, self.version.all_files[0].id]))
def test_delete_message(self):
"""Make sure we warn our users of the pain they will feel."""
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('#modal-delete p').eq(0).text(),
'Deleting your add-on will permanently remove it from the site '
'and prevent its GUID from being submitted ever again, even by '
'you. The existing users of your add-on will remain on this '
'update channel and never receive updates again.')
def test_delete_message_if_bits_are_messy(self):
"""Make sure we warn krupas of the pain they will feel."""
self.addon.highest_status = amo.STATUS_NULL
self.addon.status = amo.STATUS_UNREVIEWED
self.addon.save()
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('#modal-delete p').eq(0).text(),
'Deleting your add-on will permanently remove it from the site '
'and prevent its GUID from being submitted ever again, even by '
'you. The existing users of your add-on will remain on this '
'update channel and never receive updates again.')
def test_delete_message_incomplete(self):
"""
If an addon has highest_status = 0, they shouldn't be bothered with a
blacklisting threat if they hit delete.
"""
self.addon.highest_status = amo.STATUS_NULL
self.addon.status = amo.STATUS_NULL
self.addon.save()
r = self.client.get(self.url)
doc = pq(r.content)
# Normally 2 paragraphs, one is the warning which we should take out.
eq_(doc('#modal-delete p.warning').length, 0)
def test_delete_version(self):
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=81551).exists()
eq_(ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id)
.count(), 1)
def test_delete_version_then_detail(self):
version, file = self._extra_version_and_file(amo.STATUS_LITE)
self.client.post(self.delete_url, self.delete_data)
res = self.client.get(reverse('addons.detail', args=[self.addon.slug]))
eq_(res.status_code, 200)
def test_cant_delete_version(self):
self.client.logout()
res = self.client.post(self.delete_url, self.delete_data)
eq_(res.status_code, 302)
assert Version.objects.filter(pk=81551).exists()
def test_version_delete_status_null(self):
res = self.client.post(self.delete_url, self.delete_data)
eq_(res.status_code, 302)
eq_(self.addon.versions.count(), 0)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_NULL)
def _extra_version_and_file(self, status):
version = Version.objects.get(id=81551)
version_two = Version(addon=self.addon,
license=version.license,
version='1.2.3')
version_two.save()
file_two = File(status=status, version=version_two)
file_two.save()
return version_two, file_two
def test_version_delete_status(self):
self._extra_version_and_file(amo.STATUS_PUBLIC)
res = self.client.post(self.delete_url, self.delete_data)
eq_(res.status_code, 302)
eq_(self.addon.versions.count(), 1)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_PUBLIC)
def test_version_delete_status_unreviewd(self):
self._extra_version_and_file(amo.STATUS_BETA)
res = self.client.post(self.delete_url, self.delete_data)
eq_(res.status_code, 302)
eq_(self.addon.versions.count(), 1)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_UNREVIEWED)
@mock.patch('files.models.File.hide_disabled_file')
def test_user_can_disable_addon(self, hide_mock):
self.addon.update(status=amo.STATUS_PUBLIC,
disabled_by_user=False)
res = self.client.post(self.disable_url)
eq_(res.status_code, 302)
addon = Addon.objects.get(id=3615)
eq_(addon.disabled_by_user, True)
eq_(addon.status, amo.STATUS_PUBLIC)
assert hide_mock.called
entry = ActivityLog.objects.get()
eq_(entry.action, amo.LOG.USER_DISABLE.id)
msg = entry.to_string()
assert self.addon.name.__unicode__() in msg, ("Unexpected: %r" % msg)
def test_user_get(self):
eq_(self.client.get(self.enable_url).status_code, 405)
def test_user_can_enable_addon(self):
self.addon.update(status=amo.STATUS_PUBLIC, disabled_by_user=True)
res = self.client.post(self.enable_url)
self.assertRedirects(res, self.url, 302)
addon = self.get_addon()
eq_(addon.disabled_by_user, False)
eq_(addon.status, amo.STATUS_PUBLIC)
entry = ActivityLog.objects.get()
eq_(entry.action, amo.LOG.USER_ENABLE.id)
msg = entry.to_string()
assert unicode(self.addon.name) in msg, ("Unexpected: %r" % msg)
def test_unprivileged_user_cant_disable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
res = self.client.post(self.disable_url)
eq_(res.status_code, 302)
eq_(Addon.objects.get(id=3615).disabled_by_user, False)
def test_non_owner_cant_disable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(username='regular@mozilla.com',
password='password')
res = self.client.post(self.disable_url)
eq_(res.status_code, 403)
eq_(Addon.objects.get(id=3615).disabled_by_user, False)
def test_non_owner_cant_enable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(username='regular@mozilla.com',
password='password')
res = self.client.get(self.enable_url)
eq_(res.status_code, 403)
eq_(Addon.objects.get(id=3615).disabled_by_user, False)
def test_show_disable_button(self):
self.addon.update(disabled_by_user=False)
res = self.client.get(self.url)
doc = pq(res.content)
assert doc('#modal-disable')
assert doc('#disable-addon')
assert not doc('#enable-addon')
def test_not_show_disable(self):
self.addon.update(status=amo.STATUS_DISABLED, disabled_by_user=False)
res = self.client.get(self.url)
doc = pq(res.content)
assert not doc('#modal-disable')
assert not doc('#disable-addon')
def test_show_enable_button(self):
self.addon.update(disabled_by_user=True)
res = self.client.get(self.url)
doc = pq(res.content)
a = doc('#enable-addon')
assert a, "Expected Enable addon link"
eq_(a.attr('href'), self.enable_url)
assert not doc('#modal-disable')
assert not doc('#disable-addon')
def test_cancel_get(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
eq_(self.client.get(cancel_url).status_code, 405)
def test_cancel_wrong_status(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
for status in amo.STATUS_CHOICES:
if status in amo.STATUS_UNDER_REVIEW + (amo.STATUS_DELETED,):
continue
self.addon.update(status=status)
self.client.post(cancel_url)
eq_(Addon.objects.get(id=3615).status, status)
def test_cancel(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
self.client.post(cancel_url)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_LITE)
for status in (amo.STATUS_UNREVIEWED, amo.STATUS_NOMINATED):
self.addon.update(status=status)
self.client.post(cancel_url)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_NULL)
def test_not_cancel(self):
self.client.logout()
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
eq_(self.addon.status, amo.STATUS_PUBLIC)
res = self.client.post(cancel_url)
eq_(res.status_code, 302)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_PUBLIC)
def test_cancel_button(self):
for status in amo.STATUS_CHOICES:
if status not in amo.STATUS_UNDER_REVIEW:
continue
self.addon.update(status=status)
res = self.client.get(self.url)
doc = pq(res.content)
assert doc('#cancel-review')
assert doc('#modal-cancel')
def test_not_cancel_button(self):
for status in amo.STATUS_CHOICES:
if status in amo.STATUS_UNDER_REVIEW:
continue
self.addon.update(status=status)
res = self.client.get(self.url)
doc = pq(res.content)
assert not doc('#cancel-review')
assert not doc('#modal-cancel')
def test_purgatory_request_review(self):
self.addon.update(status=amo.STATUS_PURGATORY)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button').text()
eq_(buttons, 'Request Preliminary Review Request Full Review')
def test_incomplete_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button').text()
eq_(buttons, 'Request Preliminary Review Request Full Review')
def test_rejected_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
self.addon.latest_version.files.update(status=amo.STATUS_DISABLED)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button').text()
eq_(buttons, None)
def test_days_until_full_nomination(self):
f = File.objects.create(status=amo.STATUS_LITE, version=self.version)
f.update(datestatuschanged=datetime.now() - timedelta(days=4))
self.addon.update(status=amo.STATUS_LITE)
doc = pq(self.client.get(self.url).content)
eq_(doc('.version-status-actions .warning').text(),
'Full nomination will be available in 6 days')
def test_add_version_modal(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
# Make sure checkboxes are visible:
eq_(doc('.desktop-platforms input.platform').length, 4)
eq_(doc('.mobile-platforms input.platform').length, 3)
eq_(set([i.attrib['type'] for i in doc('input.platform')]),
set(['checkbox']))
class TestVersionEdit(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'base/addon_3615',
'base/thunderbird', 'base/platforms']
def setUp(self):
assert self.client.login(username='del@icio.us', password='password')
self.addon = self.get_addon()
self.version = self.get_version()
self.url = reverse('devhub.versions.edit',
args=['a3615', self.version.id])
self.v1 = AppVersion(application_id=amo.FIREFOX.id, version='1.0')
self.v4 = AppVersion(application_id=amo.FIREFOX.id, version='4.0')
for v in self.v1, self.v4:
v.save()
def get_addon(self):
return Addon.objects.no_cache().get(id=3615)
def get_version(self):
return self.get_addon().current_version
def formset(self, *args, **kw):
defaults = {'approvalnotes': 'xxx'}
defaults.update(kw)
return formset(*args, **defaults)
class TestVersionEditMobile(TestVersionEdit):
def setUp(self):
super(TestVersionEditMobile, self).setUp()
self.version.apps.all().delete()
mobile = Application.objects.get(id=amo.MOBILE.id)
app_vr = AppVersion.objects.create(application=mobile, version='1.0')
ApplicationsVersions.objects.create(version=self.version,
application=mobile,
min=app_vr, max=app_vr)
self.version.files.update(platform=amo.PLATFORM_ANDROID.id)
def test_mobile_platform_options(self):
ctx = self.client.get(self.url).context
fld = ctx['file_form'].forms[0]['platform'].field
# TODO(Kumar) allow PLATFORM_ALL_MOBILE here when it is supported.
# See bug 646268.
eq_(sorted(amo.PLATFORMS[p[0]].shortname for p in fld.choices),
['android', 'maemo'])
class TestVersionEditDetails(TestVersionEdit):
def setUp(self):
super(TestVersionEditDetails, self).setUp()
ctx = self.client.get(self.url).context
compat = initial(ctx['compat_form'].forms[0])
files = initial(ctx['file_form'].forms[0])
self.initial = formset(compat, **formset(files, prefix='files'))
def formset(self, *args, **kw):
defaults = dict(self.initial)
defaults.update(kw)
return super(TestVersionEditDetails, self).formset(*args, **defaults)
def test_edit_notes(self):
d = self.formset(releasenotes='xx', approvalnotes='yy')
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
version = self.get_version()
eq_(unicode(version.releasenotes), 'xx')
eq_(unicode(version.approvalnotes), 'yy')
def test_version_number_redirect(self):
url = self.url.replace(str(self.version.id), self.version.version)
r = self.client.get(url, follow=True)
self.assertRedirects(r, self.url)
def test_supported_platforms(self):
res = self.client.get(self.url)
choices = res.context['new_file_form'].fields['platform'].choices
taken = [f.platform_id for f in self.version.files.all()]
platforms = set(self.version.compatible_platforms()) - set(taken)
eq_(len(choices), len(platforms))
def test_can_upload(self):
self.version.files.all().delete()
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('a.add-file')
def test_not_upload(self):
res = self.client.get(self.url)
doc = pq(res.content)
assert not doc('a.add-file')
def test_add(self):
res = self.client.get(self.url)
doc = pq(res.content)
assert res.context['compat_form'].extra_forms
assert doc('p.add-app')[0].attrib['class'] == 'add-app'
def test_add_not(self):
Application(id=52).save()
for id in [18, 52, 59, 60, 61]:
av = AppVersion(application_id=id, version='1')
av.save()
ApplicationsVersions(application_id=id, min=av, max=av,
version=self.version).save()
res = self.client.get(self.url)
doc = pq(res.content)
assert not res.context['compat_form'].extra_forms
assert doc('p.add-app')[0].attrib['class'] == 'add-app hide'
class TestVersionEditSearchEngine(TestVersionEdit):
# https://bugzilla.mozilla.org/show_bug.cgi?id=605941
fixtures = ['base/apps', 'base/users',
'base/thunderbird', 'base/addon_4594_a9.json',
'base/platforms']
def setUp(self):
assert self.client.login(username='admin@mozilla.com',
password='password')
self.url = reverse('devhub.versions.edit',
args=['a4594', 42352])
def test_search_engine_edit(self):
dd = self.formset(prefix="files", releasenotes='xx',
approvalnotes='yy')
r = self.client.post(self.url, dd)
eq_(r.status_code, 302)
version = Addon.objects.no_cache().get(id=4594).current_version
eq_(unicode(version.releasenotes), 'xx')
eq_(unicode(version.approvalnotes), 'yy')
def test_no_compat(self):
r = self.client.get(self.url)
doc = pq(r.content)
assert not doc("#id_form-TOTAL_FORMS")
def test_no_upload(self):
r = self.client.get(self.url)
doc = pq(r.content)
assert not doc('a.add-file')
@mock.patch('versions.models.Version.is_allowed_upload')
def test_can_upload(self, allowed):
allowed.return_value = True
res = self.client.get(self.url)
doc = pq(res.content)
assert doc('a.add-file')
class TestVersionEditFiles(TestVersionEdit):
def setUp(self):
super(TestVersionEditFiles, self).setUp()
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
self.compat = initial(f)
def formset(self, *args, **kw):
compat = formset(self.compat, initial_count=1)
compat.update(kw)
return super(TestVersionEditFiles, self).formset(*args, **compat)
def test_delete_file(self):
version = self.addon.current_version
version.files.all()[0].update(status=amo.STATUS_UNREVIEWED)
eq_(self.version.files.count(), 1)
forms = map(initial,
self.client.get(self.url).context['file_form'].forms)
forms[0]['DELETE'] = True
eq_(ActivityLog.objects.count(), 0)
r = self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(ActivityLog.objects.count(), 2)
log = ActivityLog.objects.order_by('created')[1]
eq_(log.to_string(), u'File delicious_bookmarks-2.1.072-fx.xpi deleted'
' from <a href="/en-US/firefox/addon/a3615'
'/versions/2.1.072">Version 2.1.072</a> of <a '
'href="/en-US/firefox/addon/a3615/">Delicious '
'Bookmarks</a>.')
eq_(r.status_code, 302)
eq_(self.version.files.count(), 0)
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_unique_platforms(self):
# Move the existing file to Linux.
f = self.version.files.get()
f.update(platform=Platform.objects.get(id=amo.PLATFORM_LINUX.id))
# And make a new file for Mac.
File.objects.create(version=self.version,
platform_id=amo.PLATFORM_MAC.id)
forms = map(initial,
self.client.get(self.url).context['file_form'].forms)
forms[1]['platform'] = forms[0]['platform']
r = self.client.post(self.url, self.formset(*forms, prefix='files'))
doc = pq(r.content)
assert doc('#id_files-0-platform')
eq_(r.status_code, 200)
eq_(r.context['file_form'].non_form_errors(),
['A platform can only be chosen once.'])
def test_all_platforms(self):
version = self.addon.current_version
version.files.all()[0].update(status=amo.STATUS_UNREVIEWED)
File.objects.create(version=self.version,
platform_id=amo.PLATFORM_MAC.id)
forms = self.client.get(self.url).context['file_form'].forms
forms = map(initial, forms)
res = self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(res.context['file_form'].non_form_errors()[0],
'The platform All cannot be combined with specific platforms.')
def test_all_platforms_and_delete(self):
version = self.addon.current_version
version.files.all()[0].update(status=amo.STATUS_UNREVIEWED)
File.objects.create(version=self.version,
platform_id=amo.PLATFORM_MAC.id)
forms = self.client.get(self.url).context['file_form'].forms
forms = map(initial, forms)
# A test that we don't check the platform for deleted files.
forms[1]['DELETE'] = 1
self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(self.version.files.count(), 1)
def add_in_bsd(self):
f = self.version.files.get()
# The default file is All, which prevents the addition of more files.
f.update(platform=Platform.objects.get(id=amo.PLATFORM_MAC.id))
return File.objects.create(version=self.version,
platform_id=amo.PLATFORM_BSD.id)
def get_platforms(self, form):
return [amo.PLATFORMS[i[0]].shortname
for i in form.fields['platform'].choices]
# The unsupported platform tests are for legacy addons. We don't
# want new addons uploaded with unsupported platforms but the old files can
# still be edited.
def test_all_unsupported_platforms(self):
self.add_in_bsd()
forms = self.client.get(self.url).context['file_form'].forms
choices = self.get_platforms(forms[1])
assert 'bsd' in choices, (
'After adding a BSD file, expected its platform to be '
'available in: %r' % choices)
def test_all_unsupported_platforms_unchange(self):
bsd = self.add_in_bsd()
forms = self.client.get(self.url).context['file_form'].forms
forms = map(initial, forms)
self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(File.objects.no_cache().get(pk=bsd.pk).platform_id,
amo.PLATFORM_BSD.id)
def test_all_unsupported_platforms_change(self):
bsd = self.add_in_bsd()
forms = self.client.get(self.url).context['file_form'].forms
forms = map(initial, forms)
# Update the file platform to Linux:
forms[1]['platform'] = amo.PLATFORM_LINUX.id
self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(File.objects.no_cache().get(pk=bsd.pk).platform_id,
amo.PLATFORM_LINUX.id)
forms = self.client.get(self.url).context['file_form'].forms
choices = self.get_platforms(forms[1])
assert 'bsd' not in choices, (
'After changing BSD file to Linux, BSD should no longer be a '
'platform choice in: %r' % choices)
def test_add_file_modal(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
# Make sure radio buttons are visible:
eq_(doc('.platform ul label').text(), 'Linux Mac OS X Windows')
eq_(set([i.attrib['type'] for i in doc('input.platform')]),
set(['radio']))
def test_mobile_addon_supports_only_mobile_platforms(self):
app = Application.objects.get(pk=amo.MOBILE.id)
for a in self.version.apps.all():
a.application = app
a.save()
self.version.files.all().update(platform=amo.PLATFORM_ALL_MOBILE.id)
forms = self.client.get(self.url).context['file_form'].forms
choices = self.get_platforms(forms[0])
eq_(sorted(choices),
sorted([p.shortname for p in amo.MOBILE_PLATFORMS.values()]))
class TestPlatformSearch(TestVersionEdit):
fixtures = ['base/apps', 'base/users',
'base/thunderbird', 'base/addon_4594_a9.json']
def setUp(self):
assert self.client.login(username='admin@mozilla.com',
password='password')
self.url = reverse('devhub.versions.edit',
args=['a4594', 42352])
self.version = Version.objects.get(id=42352)
self.file = self.version.files.all()[0]
for platform in amo.PLATFORMS:
if platform != 0:
Platform.objects.get_or_create(id=platform)
def test_no_platform_search_engine(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('#id_files-0-platform')
def test_changing_platform_search_engine(self):
dd = self.formset({'id': int(self.file.pk),
'platform': amo.PLATFORM_LINUX.id},
prefix='files', releasenotes='xx',
approvalnotes='yy')
response = self.client.post(self.url, dd)
eq_(response.status_code, 302)
version = Version.objects.no_cache().get(id=42352).files.all()[0]
eq_(amo.PLATFORM_ALL.id, version.platform.id)
class TestVersionEditCompat(TestVersionEdit):
def get_form(self, url=None):
if not url:
url = self.url
av = self.version.apps.get()
eq_(av.min.version, '2.0')
eq_(av.max.version, '4.0')
f = self.client.get(url).context['compat_form'].initial_forms[0]
return initial(f)
def formset(self, *args, **kw):
defaults = formset(prefix='files')
defaults.update(kw)
return super(TestVersionEditCompat, self).formset(*args, **defaults)
def test_add_appversion(self):
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
d = self.formset(initial(f), dict(application=18, min=28, max=29),
initial_count=1)
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
apps = self.get_version().compatible_apps.keys()
eq_(sorted(apps), sorted([amo.FIREFOX, amo.THUNDERBIRD]))
eq_(list(ActivityLog.objects.all().values_list('action')),
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)])
def test_update_appversion(self):
d = self.get_form()
d.update(min=self.v1.id, max=self.v4.id)
r = self.client.post(self.url,
self.formset(d, initial_count=1))
eq_(r.status_code, 302)
av = self.version.apps.get()
eq_(av.min.version, '1.0')
eq_(av.max.version, '4.0')
eq_(list(ActivityLog.objects.all().values_list('action')),
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)])
def test_ajax_update_appversion(self):
url = reverse('devhub.ajax.compat.update',
args=['a3615', self.version.id])
d = self.get_form(url)
d.update(min=self.v1.id, max=self.v4.id)
r = self.client.post(url, self.formset(d, initial_count=1))
eq_(r.status_code, 200)
av = self.version.apps.get()
eq_(av.min.version, '1.0')
eq_(av.max.version, '4.0')
eq_(list(ActivityLog.objects.all().values_list('action')),
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)])
def test_delete_appversion(self):
# Add thunderbird compat so we can delete firefox.
self.test_add_appversion()
f = self.client.get(self.url).context['compat_form']
d = map(initial, f.initial_forms)
d[0]['DELETE'] = True
r = self.client.post(self.url, self.formset(*d, initial_count=2))
eq_(r.status_code, 302)
apps = self.get_version().compatible_apps.keys()
eq_(apps, [amo.THUNDERBIRD])
eq_(list(ActivityLog.objects.all().values_list('action')),
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)])
def test_unique_apps(self):
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
dupe = initial(f)
del dupe['id']
d = self.formset(initial(f), dupe, initial_count=1)
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
# Because of how formsets work, the second form is expected to be a
# tbird version range. We got an error, so we're good.
def test_require_appversion(self):
old_av = self.version.apps.get()
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
d = initial(f)
d['DELETE'] = True
r = self.client.post(self.url, self.formset(d, initial_count=1))
eq_(r.status_code, 200)
eq_(r.context['compat_form'].non_form_errors(),
['Need at least one compatible application.'])
eq_(self.version.apps.get(), old_av)
def test_proper_min_max(self):
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
d = initial(f)
d['min'], d['max'] = d['max'], d['min']
r = self.client.post(self.url, self.formset(d, initial_count=1))
eq_(r.status_code, 200)
eq_(r.context['compat_form'].forms[0].non_field_errors(),
['Invalid version range.'])
def test_same_min_max(self):
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
d = initial(f)
d['min'] = d['max']
r = self.client.post(self.url, self.formset(d, initial_count=1))
eq_(r.status_code, 302)
av = self.version.apps.all()[0]
eq_(av.min, av.max)
|
|
#!/usr/bin/env python
# Full license can be found in License.md
# Full author list can be found in .zenodo.json file
# DOI:10.5281/zenodo.1199703
# ----------------------------------------------------------------------------
"""Tests the pysat MetaLabels object."""
import datetime as dt
import logging
import numpy as np
import pytest
import pysat
class TestMetaLabels(object):
"""Unit and integration tests for the MetaLabels class."""
def setup(self):
"""Set up the unit test environment for each method."""
testInst = pysat.Instrument('pysat', 'testing')
self.meta_labels = testInst.meta.labels
self.meta = pysat.Meta()
return
def teardown(self):
"""Clean up the unit test environment after each method."""
del self.meta, self.meta_labels
return
# -----------------------
# Test the Error messages
def test_default_label_value_raises_error(self):
"""Test `MetaLabels.default_values_from_attr` errors with bad attr."""
with pytest.raises(ValueError) as verr:
self.meta_labels.default_values_from_attr('not_an_attr')
assert verr.match("unknown label attribute")
return
@pytest.mark.parametrize("iter_type", [list, dict, set, tuple, np.ndarray])
def test_set_bad_type(self, iter_type):
"""Test MetaLabels type evaluations does not allow iterables.
Parameters
----------
iter_type : type
Different iterable types
"""
with pytest.raises(TypeError) as terr:
pysat.MetaLabels(value_range=('val_range', iter_type))
assert str(terr).find("iterable types like") >= 0
return
@pytest.mark.parametrize("iter_type", [list, dict, set, tuple, np.ndarray])
def test_update_bad_type(self, iter_type):
"""Test MetaLabels type evaluations does not allow iterables.
Parameters
----------
iter_type : type
Different iterable types
"""
with pytest.raises(TypeError) as terr:
self.meta_labels.update("value_range", 'val_range', iter_type)
assert str(terr).find("iterable types like") >= 0
return
# -------------------------
# Test the logging messages
@pytest.mark.parametrize("in_val", [1., 1, {}, None, []])
def test_default_value_from_type_unexpected_input(self, in_val, caplog):
"""Test `MetaLabels.default_values_from_type` with unexpected input."""
with caplog.at_level(logging.INFO, logger='pysat'):
self.meta_labels.default_values_from_type(in_val)
# Test for expected string
captured = caplog.text
test_str = 'No type match found for '
assert captured.find(test_str) >= 0
return
# ---------------------------
# Test the class magic methods
def test_repr(self):
"""Test the `MetaLabels.__repr__` method."""
out = self.meta_labels.__repr__()
assert isinstance(out, str)
assert out.find('pysat.MetaLabels(') >= 0
return
# -----------------------------
# Test the class hidden methods
@pytest.mark.parametrize("val_type", [int, float, type(None), str, bytes,
bool, np.float32, np.float64,
np.int32, np.int64, np.datetime64,
dt.datetime, dt.timedelta])
def test_eval_label_type_true(self, val_type):
"""Test successful ID of an allowable meta data type.
Parameters
----------
val_type : type
Scalar data type
"""
assert self.meta_labels._eval_label_type(val_type)
return
@pytest.mark.parametrize("val_type", [list, dict, set, tuple, np.ndarray])
def test_eval_label_type_false(self, val_type):
"""Test successful ID of an allowable meta data type.
Parameters
----------
val_type : type
Iterable data type
"""
assert not self.meta_labels._eval_label_type(val_type)
return
# -----------------------------
# Test the class public methods
@pytest.mark.parametrize("in_val",
[float, np.float16, np.float32, np.float64])
def test_default_value_from_type_float_inputs(self, in_val):
"""Test `MetaLabels.default_values_from_type` with float inputs."""
out = self.meta.labels.default_values_from_type(in_val)
assert np.isnan(out)
return
@pytest.mark.parametrize("in_val, comp_val",
[(int, -1), (np.int8, -1), (np.int16, -1),
(np.int32, -1), (np.int64, -1), (str, '')])
def test_default_value_from_type_int_inputs(self, in_val, comp_val):
"""Test `MetaLabels.default_values_from_type` with int inputs."""
out = self.meta.labels.default_values_from_type(in_val)
assert out == comp_val
return
def test_update(self):
"""Test successful update of MetaLabels."""
self.meta_labels.update('new_label', 'new_name', int)
assert hasattr(self.meta_labels, 'new_label')
assert self.meta_labels.new_label == 'new_name'
assert self.meta_labels.label_type['new_label'] == int
return
# ----------------------------------------
# Test the integration with the Meta class
def test_change_case_of_meta_labels(self):
"""Test changing case of meta labels after initialization."""
self.meta_labels = {'units': ('units', str), 'name': ('long_name', str)}
self.meta = pysat.Meta(labels=self.meta_labels)
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta.labels.units = 'Units'
self.meta.labels.name = 'Long_Name'
assert (self.meta['new'].Units == 'hey')
assert (self.meta['new'].Long_Name == 'boo')
assert (self.meta['new2'].Units == 'hey2')
assert (self.meta['new2'].Long_Name == 'boo2')
return
def test_case_change_of_meta_labels_w_ho(self):
"""Test change case of meta labels after initialization with HO data."""
# Set the initial labels
self.meta_labels = {'units': ('units', str), 'name': ('long_Name', str)}
self.meta = pysat.Meta(labels=self.meta_labels)
meta2 = pysat.Meta(labels=self.meta_labels)
# Set meta data values
meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = meta2
# Change the label name
self.meta.labels.units = 'Units'
self.meta.labels.name = 'Long_Name'
# Evaluate the results in the main data
assert (self.meta['new'].Units == 'hey')
assert (self.meta['new'].Long_Name == 'boo')
# Evaluate the results in the higher order data
assert (self.meta['new2'].children['new21'].Units == 'hey2')
assert (self.meta['new2'].children['new21'].Long_Name == 'boo2')
return
|
|
from __future__ import unicode_literals
from celery import chain
from waldur_core.core import executors as core_executors
from waldur_core.core import tasks as core_tasks
from waldur_core.core import utils as core_utils
from waldur_core.structure import executors as structure_executors
from waldur_openstack.openstack import executors as openstack_executors
from . import tasks, models
class VolumeCreateExecutor(core_executors.CreateExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
return chain(
tasks.ThrottleProvisionTask().si(
serialized_volume,
'create_volume',
state_transition='begin_creating'
),
core_tasks.PollRuntimeStateTask().si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error',
).set(countdown=30)
)
class VolumeUpdateExecutor(core_executors.UpdateExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
updated_fields = kwargs['updated_fields']
if 'name' in updated_fields or 'description' in updated_fields:
return core_tasks.BackendMethodTask().si(
serialized_volume, 'update_volume', state_transition='begin_updating')
else:
return core_tasks.StateTransitionTask().si(serialized_volume, state_transition='begin_updating')
class VolumeDeleteExecutor(core_executors.DeleteExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
if volume.backend_id:
return chain(
core_tasks.BackendMethodTask().si(
serialized_volume, 'delete_volume', state_transition='begin_deleting'),
core_tasks.PollBackendCheckTask().si(serialized_volume, 'is_volume_deleted'),
)
else:
return core_tasks.StateTransitionTask().si(serialized_volume, state_transition='begin_deleting')
class VolumePullExecutor(core_executors.ActionExecutor):
action = 'Pull'
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
return core_tasks.BackendMethodTask().si(
serialized_volume, 'pull_volume',
state_transition='begin_updating')
class VolumeExtendExecutor(core_executors.ActionExecutor):
action = 'Extend'
@classmethod
def get_action_details(cls, volume, **kwargs):
return {
'message': 'Extend volume from %s MB to %s MB' % (kwargs.get('old_size'), volume.size),
'old_size': kwargs.get('old_size'),
'new_size': volume.size,
}
@classmethod
def pre_apply(cls, volume, **kwargs):
super(VolumeExtendExecutor, cls).pre_apply(volume, **kwargs)
if volume.instance is not None:
volume.instance.action = 'Extend volume'
volume.instance.schedule_updating()
volume.instance.save()
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
if volume.instance is None:
return chain(
core_tasks.BackendMethodTask().si(
serialized_volume,
backend_method='extend_volume',
state_transition='begin_updating',
),
core_tasks.PollRuntimeStateTask().si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error'
)
)
return chain(
core_tasks.StateTransitionTask().si(
core_utils.serialize_instance(volume.instance),
state_transition='begin_updating'
),
core_tasks.BackendMethodTask().si(
serialized_volume,
backend_method='detach_volume',
state_transition='begin_updating'
),
core_tasks.PollRuntimeStateTask().si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error'
),
core_tasks.BackendMethodTask().si(
serialized_volume,
backend_method='extend_volume',
),
core_tasks.PollRuntimeStateTask().si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error'
),
core_tasks.BackendMethodTask().si(
serialized_volume,
instance_uuid=volume.instance.uuid.hex,
device=volume.device,
backend_method='attach_volume',
),
core_tasks.PollRuntimeStateTask().si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='in-use',
erred_state='error'
),
)
@classmethod
def get_success_signature(cls, volume, serialized_volume, **kwargs):
if volume.instance is None:
return super(VolumeExtendExecutor, cls).get_success_signature(volume, serialized_volume, **kwargs)
else:
instance = volume.instance
serialized_instance = core_utils.serialize_instance(instance)
return chain(
super(VolumeExtendExecutor, cls).get_success_signature(volume, serialized_volume, **kwargs),
super(VolumeExtendExecutor, cls).get_success_signature(instance, serialized_instance, **kwargs),
)
@classmethod
def get_failure_signature(cls, volume, serialized_volume, **kwargs):
return tasks.VolumeExtendErredTask().s(serialized_volume)
class VolumeAttachExecutor(core_executors.ActionExecutor):
action = 'Attach'
@classmethod
def get_action_details(cls, volume, **kwargs):
return {'message': 'Attach volume to instance %s' % volume.instance.name}
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_volume,
instance_uuid=volume.instance.uuid.hex,
device=volume.device,
backend_method='attach_volume',
state_transition='begin_updating'
),
core_tasks.PollRuntimeStateTask().si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='in-use',
erred_state='error',
),
# additional pull to populate field "device".
core_tasks.BackendMethodTask().si(serialized_volume, backend_method='pull_volume'),
)
class VolumeDetachExecutor(core_executors.ActionExecutor):
action = 'Detach'
@classmethod
def get_action_details(cls, volume, **kwargs):
return {'message': 'Detach volume from instance %s' % volume.instance.name}
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_volume, backend_method='detach_volume', state_transition='begin_updating'),
core_tasks.PollRuntimeStateTask().si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error',
)
)
class SnapshotCreateExecutor(core_executors.CreateExecutor):
@classmethod
def get_task_signature(cls, snapshot, serialized_snapshot, **kwargs):
return chain(
tasks.ThrottleProvisionTask().si(
serialized_snapshot,
'create_snapshot',
state_transition='begin_creating'
),
core_tasks.PollRuntimeStateTask().si(
serialized_snapshot,
backend_pull_method='pull_snapshot_runtime_state',
success_state='available',
erred_state='error',
).set(countdown=10)
)
class SnapshotUpdateExecutor(core_executors.UpdateExecutor):
@classmethod
def get_task_signature(cls, snapshot, serialized_snapshot, **kwargs):
updated_fields = kwargs['updated_fields']
# TODO: call separate task on metadata update
if 'name' in updated_fields or 'description' in updated_fields:
return core_tasks.BackendMethodTask().si(
serialized_snapshot, 'update_snapshot', state_transition='begin_updating')
else:
return core_tasks.StateTransitionTask().si(serialized_snapshot, state_transition='begin_updating')
class SnapshotDeleteExecutor(core_executors.DeleteExecutor):
@classmethod
def get_task_signature(cls, snapshot, serialized_snapshot, **kwargs):
if snapshot.backend_id:
return chain(
core_tasks.BackendMethodTask().si(
serialized_snapshot, 'delete_snapshot', state_transition='begin_deleting'),
core_tasks.PollBackendCheckTask().si(serialized_snapshot, 'is_snapshot_deleted'),
)
else:
return core_tasks.StateTransitionTask().si(serialized_snapshot, state_transition='begin_deleting')
class SnapshotPullExecutor(core_executors.ActionExecutor):
action = 'Pull'
@classmethod
def get_task_signature(cls, snapshot, serialized_snapshot, **kwargs):
return core_tasks.BackendMethodTask().si(
serialized_snapshot, 'pull_snapshot',
state_transition='begin_updating')
class InstanceCreateExecutor(core_executors.CreateExecutor):
""" First - create instance volumes in parallel, after - create instance based on created volumes """
@classmethod
def get_task_signature(cls, instance, serialized_instance, ssh_key=None, flavor=None):
""" Create all instance volumes in parallel and wait for them to provision """
serialized_volumes = [core_utils.serialize_instance(volume) for volume in instance.volumes.all()]
_tasks = [tasks.ThrottleProvisionStateTask().si(serialized_instance, state_transition='begin_creating')]
# Create volumes
for serialized_volume in serialized_volumes:
_tasks.append(tasks.ThrottleProvisionTask().si(
serialized_volume, 'create_volume', state_transition='begin_creating'))
for index, serialized_volume in enumerate(serialized_volumes):
# Wait for volume creation
_tasks.append(core_tasks.PollRuntimeStateTask().si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error',
).set(countdown=30 if index == 0 else 0))
# Pull volume to sure that it is bootable
_tasks.append(core_tasks.BackendMethodTask().si(serialized_volume, 'pull_volume'))
# Mark volume as OK
_tasks.append(core_tasks.StateTransitionTask().si(serialized_volume, state_transition='set_ok'))
# Create instance based on volumes
kwargs = {
'backend_flavor_id': flavor.backend_id,
}
if ssh_key is not None:
kwargs['public_key'] = ssh_key.public_key
# Wait 10 seconds after volume creation due to OpenStack restrictions.
_tasks.append(core_tasks.BackendMethodTask().si(
serialized_instance, 'create_instance', **kwargs).set(countdown=10))
# Wait for instance creation
_tasks.append(core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state=models.Instance.RuntimeStates.ACTIVE,
erred_state=models.Instance.RuntimeStates.ERROR,
))
# Update volumes runtime state and device name
for serialized_volume in serialized_volumes:
_tasks.append(core_tasks.BackendMethodTask().si(
serialized_volume,
backend_method='pull_volume',
update_fields=['runtime_state', 'device']
))
# Pull instance internal IPs
# pull_instance_internal_ips method cannot be used, because it requires backend_id to update
# existing internal IPs. However, internal IPs of the created instance does not have backend_ids.
_tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'pull_created_instance_internal_ips'))
# Pull instance security groups
_tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'pull_instance_security_groups'))
# Create non-existing floating IPs
for floating_ip in instance.floating_ips.filter(backend_id=''):
serialized_floating_ip = core_utils.serialize_instance(floating_ip)
_tasks.append(core_tasks.BackendMethodTask().si(serialized_floating_ip, 'create_floating_ip'))
# Push instance floating IPs
_tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'push_instance_floating_ips'))
# Wait for operation completion
for index, floating_ip in enumerate(instance.floating_ips):
_tasks.append(core_tasks.PollRuntimeStateTask().si(
core_utils.serialize_instance(floating_ip),
backend_pull_method='pull_floating_ip_runtime_state',
success_state='ACTIVE',
erred_state='ERRED',
).set(countdown=5 if not index else 0))
shared_tenant = instance.service_project_link.service.settings.scope
if shared_tenant:
serialized_executor = core_utils.serialize_class(openstack_executors.TenantPullFloatingIPsExecutor)
serialized_tenant = core_utils.serialize_instance(shared_tenant)
_tasks.append(core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant))
return chain(*_tasks)
@classmethod
def get_success_signature(cls, instance, serialized_instance, **kwargs):
return tasks.SetInstanceOKTask().si(serialized_instance)
@classmethod
def get_failure_signature(cls, instance, serialized_instance, **kwargs):
return tasks.SetInstanceErredTask().s(serialized_instance)
class InstanceUpdateExecutor(core_executors.UpdateExecutor):
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
updated_fields = kwargs['updated_fields']
if 'name' in updated_fields:
return core_tasks.BackendMethodTask().si(
serialized_instance, 'update_instance', state_transition='begin_updating')
else:
return core_tasks.StateTransitionTask().si(serialized_instance, state_transition='begin_updating')
class InstanceUpdateSecurityGroupsExecutor(core_executors.ActionExecutor):
action = 'Update security groups'
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return core_tasks.BackendMethodTask().si(
serialized_instance,
backend_method='push_instance_security_groups',
state_transition='begin_updating',
)
class InstanceDeleteExecutor(core_executors.DeleteExecutor):
@classmethod
def get_task_signature(cls, instance, serialized_instance, force=False, **kwargs):
delete_volumes = kwargs.pop('delete_volumes', True)
release_floating_ips = kwargs.pop('release_floating_ips', True)
delete_instance = cls.get_delete_instance_tasks(instance, serialized_instance, release_floating_ips)
# Case 1. Instance does not exist at backend
if not instance.backend_id:
return core_tasks.StateTransitionTask().si(
serialized_instance,
state_transition='begin_deleting'
)
# Case 2. Instance exists at backend.
# Data volumes are deleted by OpenStack because delete_on_termination=True
elif delete_volumes:
return chain(delete_instance)
# Case 3. Instance exists at backend.
# Data volumes are detached and not deleted.
else:
detach_volumes = cls.get_detach_data_volumes_tasks(instance, serialized_instance)
return chain(detach_volumes + delete_instance)
@classmethod
def get_delete_instance_tasks(cls, instance, serialized_instance, release_floating_ips):
_tasks = [
core_tasks.BackendMethodTask().si(
serialized_instance,
backend_method='delete_instance',
state_transition='begin_deleting',
),
core_tasks.PollBackendCheckTask().si(
serialized_instance,
backend_check_method='is_instance_deleted',
),
]
if release_floating_ips:
for index, floating_ip in enumerate(instance.floating_ips):
_tasks.append(core_tasks.BackendMethodTask().si(
core_utils.serialize_instance(floating_ip),
'delete_floating_ip',
).set(countdown=5 if not index else 0))
else:
# pull related floating IPs state after instance deletion
for index, floating_ip in enumerate(instance.floating_ips):
_tasks.append(core_tasks.BackendMethodTask().si(
core_utils.serialize_instance(floating_ip),
'pull_floating_ip_runtime_state',
).set(countdown=5 if not index else 0))
shared_tenant = instance.service_project_link.service.settings.scope
if shared_tenant:
serialized_executor = core_utils.serialize_class(openstack_executors.TenantPullFloatingIPsExecutor)
serialized_tenant = core_utils.serialize_instance(shared_tenant)
_tasks.append(core_tasks.ExecutorTask().si(serialized_executor, serialized_tenant))
return _tasks
@classmethod
def get_detach_data_volumes_tasks(cls, instance, serialized_instance):
data_volumes = instance.volumes.all().filter(bootable=False)
detach_volumes = [
core_tasks.BackendMethodTask().si(
core_utils.serialize_instance(volume),
backend_method='detach_volume',
)
for volume in data_volumes
]
check_volumes = [
core_tasks.PollRuntimeStateTask().si(
core_utils.serialize_instance(volume),
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error'
)
for volume in data_volumes
]
return detach_volumes + check_volumes
class InstanceFlavorChangeExecutor(core_executors.ActionExecutor):
action = 'Change flavor'
@classmethod
def get_action_details(cls, instance, **kwargs):
old_flavor_name = kwargs.get('old_flavor_name')
new_flavor_name = kwargs.get('flavor').name
return {
'message': 'Change flavor from %s to %s' % (old_flavor_name, new_flavor_name),
'old_flavor_name': old_flavor_name,
'new_flavor_name': new_flavor_name,
}
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
flavor = kwargs.pop('flavor')
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance,
backend_method='resize_instance',
state_transition='begin_updating',
flavor_id=flavor.backend_id
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='VERIFY_RESIZE',
erred_state='ERRED'
),
core_tasks.BackendMethodTask().si(
serialized_instance,
backend_method='confirm_instance_resize'
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='SHUTOFF',
erred_state='ERRED'
),
)
class InstancePullExecutor(core_executors.ActionExecutor):
action = 'Pull'
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance, 'pull_instance', state_transition='begin_updating',
),
core_tasks.BackendMethodTask().si(serialized_instance, 'pull_instance_security_groups'),
core_tasks.BackendMethodTask().si(serialized_instance, 'pull_instance_internal_ips'),
core_tasks.BackendMethodTask().si(serialized_instance, 'pull_instance_floating_ips'),
)
class InstanceFloatingIPsUpdateExecutor(core_executors.ActionExecutor):
action = 'Update floating IPs'
@classmethod
def get_action_details(cls, instance, **kwargs):
attached = set(instance._new_floating_ips) - set(instance._old_floating_ips)
detached = set(instance._old_floating_ips) - set(instance._new_floating_ips)
messages = []
if attached:
messages.append('Attached floating IPs: %s.' % ', '.join(attached))
if detached:
messages.append('Detached floating IPs: %s.' % ', '.join(detached))
if not messages:
messages.append('Instance floating IPs have been updated.')
return {
'message': ' '.join(messages),
'attached': list(attached),
'detached': list(detached),
}
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
_tasks = [core_tasks.StateTransitionTask().si(serialized_instance, state_transition='begin_updating')]
# Create non-exist floating IPs
for floating_ip in instance.floating_ips.filter(backend_id=''):
serialized_floating_ip = core_utils.serialize_instance(floating_ip)
_tasks.append(core_tasks.BackendMethodTask().si(serialized_floating_ip, 'create_floating_ip'))
# Push instance floating IPs
_tasks.append(core_tasks.BackendMethodTask().si(serialized_instance, 'push_instance_floating_ips'))
# Wait for operation completion
for index, floating_ip in enumerate(instance.floating_ips):
_tasks.append(core_tasks.PollRuntimeStateTask().si(
core_utils.serialize_instance(floating_ip),
backend_pull_method='pull_floating_ip_runtime_state',
success_state='ACTIVE',
erred_state='ERRED',
).set(countdown=5 if not index else 0))
# Pull floating IPs again to update state of disconnected IPs
_tasks.append(core_tasks.IndependentBackendMethodTask().si(serialized_instance, 'pull_floating_ips'))
return chain(*_tasks)
@classmethod
def get_success_signature(cls, instance, serialized_instance, **kwargs):
return tasks.SetInstanceOKTask().si(serialized_instance)
@classmethod
def get_failure_signature(cls, instance, serialized_instance, **kwargs):
return tasks.SetInstanceErredTask().s(serialized_instance)
class InstanceStopExecutor(core_executors.ActionExecutor):
action = 'Stop'
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance, 'stop_instance', state_transition='begin_updating',
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='SHUTOFF',
erred_state='ERRED'
),
)
class InstanceStartExecutor(core_executors.ActionExecutor):
action = 'Start'
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance, 'start_instance', state_transition='begin_updating',
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='ACTIVE',
erred_state='ERRED'
),
)
class InstanceRestartExecutor(core_executors.ActionExecutor):
action = 'Restart'
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance, 'restart_instance', state_transition='begin_updating',
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='ACTIVE',
erred_state='ERRED'
),
)
class InstanceInternalIPsSetUpdateExecutor(core_executors.ActionExecutor):
action = 'Update internal IPs'
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return core_tasks.BackendMethodTask().si(
serialized_instance, 'push_instance_internal_ips', state_transition='begin_updating',
)
class BackupCreateExecutor(core_executors.CreateExecutor):
@classmethod
def get_task_signature(cls, backup, serialized_backup, **kwargs):
serialized_snapshots = [core_utils.serialize_instance(snapshot) for snapshot in backup.snapshots.all()]
_tasks = [core_tasks.StateTransitionTask().si(serialized_backup, state_transition='begin_creating')]
for serialized_snapshot in serialized_snapshots:
_tasks.append(tasks.ThrottleProvisionTask().si(
serialized_snapshot, 'create_snapshot', force=True, state_transition='begin_creating'))
for index, serialized_snapshot in enumerate(serialized_snapshots):
_tasks.append(core_tasks.PollRuntimeStateTask().si(
serialized_snapshot,
backend_pull_method='pull_snapshot_runtime_state',
success_state='available',
erred_state='error',
).set(countdown=10 if index == 0 else 0))
_tasks.append(core_tasks.StateTransitionTask().si(serialized_snapshot, state_transition='set_ok'))
return chain(*_tasks)
@classmethod
def get_failure_signature(cls, backup, serialized_backup, **kwargs):
return tasks.SetBackupErredTask().s(serialized_backup)
class BackupDeleteExecutor(core_executors.DeleteExecutor):
@classmethod
def pre_apply(cls, backup, **kwargs):
for snapshot in backup.snapshots.all():
snapshot.schedule_deleting()
snapshot.save(update_fields=['state'])
core_executors.DeleteExecutor.pre_apply(backup)
@classmethod
def get_task_signature(cls, backup, serialized_backup, force=False, **kwargs):
serialized_snapshots = [core_utils.serialize_instance(snapshot) for snapshot in backup.snapshots.all()]
_tasks = [core_tasks.StateTransitionTask().si(serialized_backup, state_transition='begin_deleting')]
for serialized_snapshot in serialized_snapshots:
_tasks.append(core_tasks.BackendMethodTask().si(
serialized_snapshot, 'delete_snapshot', state_transition='begin_deleting'))
for serialized_snapshot in serialized_snapshots:
_tasks.append(core_tasks.PollBackendCheckTask().si(serialized_snapshot, 'is_snapshot_deleted'))
_tasks.append(core_tasks.DeletionTask().si(serialized_snapshot))
return chain(*_tasks)
@classmethod
def get_failure_signature(cls, backup, serialized_backup, force=False, **kwargs):
if not force:
return tasks.SetBackupErredTask().s(serialized_backup)
else:
return tasks.ForceDeleteBackupTask().si(serialized_backup)
class SnapshotRestorationExecutor(core_executors.CreateExecutor):
""" Restores volume from snapshot instance """
@classmethod
def get_task_signature(cls, snapshot_restoration, serialized_snapshot_restoration, **kwargs):
serialized_volume = core_utils.serialize_instance(snapshot_restoration.volume)
_tasks = [
tasks.ThrottleProvisionTask().si(
serialized_volume, 'create_volume', state_transition='begin_creating'),
core_tasks.PollRuntimeStateTask().si(
serialized_volume, 'pull_volume_runtime_state', success_state='available', erred_state='error',
).set(countdown=30),
core_tasks.BackendMethodTask().si(serialized_volume, 'remove_bootable_flag'),
core_tasks.BackendMethodTask().si(serialized_volume, 'pull_volume'),
]
return chain(*_tasks)
@classmethod
def get_success_signature(cls, snapshot_restoration, serialized_snapshot_restoration, **kwargs):
serialized_volume = core_utils.serialize_instance(snapshot_restoration.volume)
return core_tasks.StateTransitionTask().si(serialized_volume, state_transition='set_ok')
@classmethod
def get_failure_signature(cls, snapshot_restoration, serialized_snapshot_restoration, **kwargs):
serialized_volume = core_utils.serialize_instance(snapshot_restoration.volume)
return core_tasks.StateTransitionTask().si(serialized_volume, state_transition='set_erred')
class OpenStackTenantCleanupExecutor(structure_executors.BaseCleanupExecutor):
related_executor = openstack_executors.OpenStackCleanupExecutor
pre_models = (
models.SnapshotSchedule,
models.BackupSchedule,
)
executors = (
(models.Snapshot, SnapshotDeleteExecutor),
(models.Backup, BackupDeleteExecutor),
(models.Instance, InstanceDeleteExecutor),
(models.Volume, VolumeDeleteExecutor),
)
|
|
"""The config flow tests for the forked_daapd media player platform."""
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.forked_daapd.const import (
CONF_LIBRESPOT_JAVA_PORT,
CONF_MAX_PLAYLISTS,
CONF_TTS_PAUSE_TIME,
CONF_TTS_VOLUME,
DOMAIN,
)
from homeassistant.config_entries import (
CONN_CLASS_LOCAL_PUSH,
SOURCE_USER,
SOURCE_ZEROCONF,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT
from tests.async_mock import patch
from tests.common import MockConfigEntry
SAMPLE_CONFIG = {
"websocket_port": 3688,
"version": "25.0",
"buildoptions": [
"ffmpeg",
"iTunes XML",
"Spotify",
"LastFM",
"MPD",
"Device verification",
"Websockets",
"ALSA",
],
}
@pytest.fixture(name="config_entry")
def config_entry_fixture():
"""Create hass config_entry fixture."""
data = {
CONF_HOST: "192.168.1.1",
CONF_PORT: "2345",
CONF_PASSWORD: "",
}
return MockConfigEntry(
version=1,
domain=DOMAIN,
title="",
data=data,
options={},
system_options={},
source=SOURCE_USER,
connection_class=CONN_CLASS_LOCAL_PUSH,
entry_id=1,
)
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_config_flow(hass, config_entry):
"""Test that the user step works."""
with patch(
"homeassistant.components.forked_daapd.config_flow.ForkedDaapdAPI.test_connection"
) as mock_test_connection, patch(
"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI.get_request",
autospec=True,
) as mock_get_request:
mock_get_request.return_value = SAMPLE_CONFIG
mock_test_connection.return_value = ["ok", "My Music on myhost"]
config_data = config_entry.data
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=config_data
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "My Music on myhost"
assert result["data"][CONF_HOST] == config_data[CONF_HOST]
assert result["data"][CONF_PORT] == config_data[CONF_PORT]
assert result["data"][CONF_PASSWORD] == config_data[CONF_PASSWORD]
# Also test that creating a new entry with the same host aborts
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=config_entry.data,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_zeroconf_updates_title(hass, config_entry):
"""Test that zeroconf updates title and aborts with same host."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "different host"}).add_to_hass(hass)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
discovery_info = {
"host": "192.168.1.1",
"port": 23,
"properties": {"mtd-version": "27.0", "Machine Name": "zeroconf_test"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert config_entry.title == "zeroconf_test"
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
async def test_config_flow_no_websocket(hass, config_entry):
"""Test config flow setup without websocket enabled on server."""
with patch(
"homeassistant.components.forked_daapd.config_flow.ForkedDaapdAPI.test_connection"
) as mock_test_connection:
# test invalid config data
mock_test_connection.return_value = ["websocket_not_enabled"]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=config_entry.data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_config_flow_zeroconf_invalid(hass):
"""Test that an invalid zeroconf entry doesn't work."""
# test with no discovery properties
discovery_info = {"host": "127.0.0.1", "port": 23}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with forked-daapd version < 27
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "26.3", "Machine Name": "forked-daapd"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with verbose mtd-version from Firefly
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "0.2.4.1", "Machine Name": "firefly"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with svn mtd-version from Firefly
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "svn-1676", "Machine Name": "firefly"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
async def test_config_flow_zeroconf_valid(hass):
"""Test that a valid zeroconf entry works."""
discovery_info = {
"host": "192.168.1.1",
"port": 23,
"properties": {
"mtd-version": "27.0",
"Machine Name": "zeroconf_test",
"Machine ID": "5E55EEFF",
},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_options_flow(hass, config_entry):
"""Test config flow options."""
with patch(
"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI.get_request",
autospec=True,
) as mock_get_request:
mock_get_request.return_value = SAMPLE_CONFIG
config_entry.add_to_hass(hass)
await config_entry.async_setup(hass)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TTS_PAUSE_TIME: 0.05,
CONF_TTS_VOLUME: 0.8,
CONF_LIBRESPOT_JAVA_PORT: 0,
CONF_MAX_PLAYLISTS: 8,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
|
|
""" Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER
from prompt_toolkit.filters import Filter, Always, IsDone, HasFocus, RendererHeightIsKnown
from prompt_toolkit.layout.containers import VSplit, HSplit, \
Window, FloatContainer, Float, ConditionalContainer
from prompt_toolkit.layout.controls import BufferControl, FillControl, TokenListControl
from prompt_toolkit.layout.dimension import LayoutDimension as D
from prompt_toolkit.layout.lexers import PygmentsLexer, Lexer as PromptLex
from prompt_toolkit.layout.menus import CompletionsMenu
from prompt_toolkit.layout.processors import HighlightSearchProcessor, \
HighlightSelectionProcessor, \
ConditionalProcessor, AppendAutoSuggestion
from prompt_toolkit.layout.prompt import DefaultPrompt
from prompt_toolkit.layout.screen import Char
from pygments.token import Token
from pygments.lexer import Lexer as PygLex
import azclishell.configuration
from azclishell.key_bindings import get_show_default, get_symbols
from azclishell.progress import get_progress_message, get_done
MAX_COMPLETION = 16
DEFAULT_COMMAND = ""
# pylint: disable=too-few-public-methods
class HasDefaultScope(Filter):
""" if there is a scope on the input """
def __call__(self, *a, **kw):
return DEFAULT_COMMAND == ""
# TODO fix this somehow
input_processors = [
ConditionalProcessor(
# By default, only highlight search when the search
# input has the focus. (Note that this doesn't mean
# there is no search: the Vi 'n' binding for instance
# still allows to jump to the next match in
# navigation mode.)
HighlightSearchProcessor(preview_search=Always()),
HasFocus(SEARCH_BUFFER)),
HighlightSelectionProcessor(),
ConditionalProcessor(
AppendAutoSuggestion(), HasFocus(DEFAULT_BUFFER) & HasDefaultScope()),
]
# pylint: disable=too-few-public-methods
class ShowDefault(Filter):
""" toggle on and off seeing the default """
def __call__(self, *a, **kw):
return get_show_default()
# pylint: disable=too-few-public-methods
class ShowSymbol(Filter):
""" toggle showing the symbols """
def __call__(self, *a, **kw):
return get_symbols()
# pylint: disable=too-few-public-methods
class ShowProgress(Filter):
""" toggle showing the progress """
def __call__(self, *a, **kw):
progress = get_progress_message()
done = get_done()
return progress != '' and not done
def get_scope():
"""" returns the default command """
return DEFAULT_COMMAND
def set_scope(com, add=True):
""" sets the scope """
global DEFAULT_COMMAND
if add:
DEFAULT_COMMAND += " " + com
else:
DEFAULT_COMMAND = com
def get_prompt_tokens(cli):
""" returns prompt tokens """
return [(Token.Az, 'az%s>> ' % DEFAULT_COMMAND)]
def get_height(cli):
""" gets the height of the cli """
if not cli.is_done:
return D(min=8)
def get_tutorial_tokens(cli):
""" tutorial tokens """
return [(Token.Toolbar, 'In Tutorial Mode: Press [Enter] after typing each part')]
def get_lexers(main_lex, exam_lex, tool_lex):
""" gets all the lexer wrappers """
if not main_lex:
return None, None, None
lexer = None
if main_lex:
if issubclass(main_lex, PromptLex):
lexer = main_lex
elif issubclass(main_lex, PygLex):
lexer = PygmentsLexer(main_lex)
if exam_lex:
if issubclass(exam_lex, PygLex):
exam_lex = PygmentsLexer(exam_lex)
if tool_lex:
if issubclass(tool_lex, PygLex):
tool_lex = PygmentsLexer(tool_lex)
return lexer, exam_lex, tool_lex
def create_tutorial_layout(lex):
""" layout for example tutorial """
lexer, _, _ = get_lexers(lex, None, None)
layout_full = HSplit([
FloatContainer(
Window(
BufferControl(
input_processors=input_processors,
lexer=lexer,
preview_search=Always()),
get_height=get_height),
[
Float(xcursor=True,
ycursor=True,
content=CompletionsMenu(
max_height=MAX_COMPLETION,
scroll_offset=1,
extra_filter=(HasFocus(DEFAULT_BUFFER))))]),
ConditionalContainer(
HSplit([
get_hline(),
get_param(lexer),
get_hline(),
Window(
content=BufferControl(
buffer_name='example_line',
lexer=lexer
),
),
Window(
TokenListControl(
get_tutorial_tokens,
default_char=Char(' ', Token.Toolbar)),
height=D.exact(1)),
]),
filter=~IsDone() & RendererHeightIsKnown()
)
])
return layout_full
def create_layout(lex, exam_lex, toolbar_lex):
""" creates the layout """
config = azclishell.configuration.CONFIGURATION
lexer, exam_lex, toolbar_lex = get_lexers(lex, exam_lex, toolbar_lex)
if not any(isinstance(processor, DefaultPrompt) for processor in input_processors):
input_processors.append(DefaultPrompt(get_prompt_tokens))
layout_lower = ConditionalContainer(
HSplit([
get_anyhline(config),
get_descriptions(config, exam_lex, lexer),
get_examplehline(config),
get_example(config, exam_lex),
ConditionalContainer(
get_hline(),
filter=ShowDefault() | ShowSymbol()
),
ConditionalContainer(
Window(
content=BufferControl(
buffer_name='default_values',
lexer=lexer
)
),
filter=ShowDefault()
),
ConditionalContainer(
get_hline(),
filter=ShowDefault() & ShowSymbol()
),
ConditionalContainer(
Window(
content=BufferControl(
buffer_name='symbols',
lexer=exam_lex
)
),
filter=ShowSymbol()
),
ConditionalContainer(
Window(
content=BufferControl(
buffer_name='progress',
lexer=lexer
)
),
filter=ShowProgress()
),
Window(
content=BufferControl(
buffer_name='bottom_toolbar',
lexer=toolbar_lex
),
),
]),
filter=~IsDone() & RendererHeightIsKnown()
)
layout_full = HSplit([
FloatContainer(
Window(
BufferControl(
input_processors=input_processors,
lexer=lexer,
preview_search=Always()),
get_height=get_height,
),
[
Float(xcursor=True,
ycursor=True,
content=CompletionsMenu(
max_height=MAX_COMPLETION,
scroll_offset=1,
extra_filter=(HasFocus(DEFAULT_BUFFER))))]),
layout_lower
])
return layout_full
def get_anyhline(config):
""" if there is a line between descriptions and example """
if config.BOOLEAN_STATES[config.config.get('Layout', 'command_description')] or\
config.BOOLEAN_STATES[config.config.get('Layout', 'param_description')]:
return Window(
width=D.exact(1),
height=D.exact(1),
content=FillControl('-', token=Token.Line))
else:
return get_empty()
def get_descript(lexer):
""" command description window """
return Window(
content=BufferControl(
buffer_name="description",
lexer=lexer))
def get_param(lexer):
""" parameter description window """
return Window(
content=BufferControl(
buffer_name="parameter",
lexer=lexer))
def get_example(config, exam_lex):
""" example description window """
if config.BOOLEAN_STATES[config.config.get('Layout', 'examples')]:
return Window(
content=BufferControl(
buffer_name="examples",
lexer=exam_lex))
else:
return get_empty()
def get_examplehline(config):
""" gets a line if there are examples """
if config.BOOLEAN_STATES[config.config.get('Layout', 'examples')]:
return get_hline()
else:
return get_empty()
def get_empty():
""" returns an empty window because of syntaxical issues """
return Window(
content=FillControl(' ')
)
def get_hline():
""" gets a horiztonal line """
return Window(
width=D.exact(1),
height=D.exact(1),
content=FillControl('-', token=Token.Line))
def get_vline():
""" gets a vertical line """
return Window(
width=D.exact(1),
height=D.exact(1),
content=FillControl('*', token=Token.Line))
def get_descriptions(config, exam_lex, lexer):
""" based on the configuration settings determines which windows to include """
if config.BOOLEAN_STATES[config.config.get('Layout', 'command_description')]:
if config.BOOLEAN_STATES[config.config.get('Layout', 'param_description')]:
return VSplit([
get_descript(exam_lex),
get_vline(),
get_param(lexer),
])
else:
return get_descript(exam_lex)
elif config.BOOLEAN_STATES[config.config.get('Layout', 'param_description')]:
return get_param(lexer)
else:
return get_empty()
|
|
import unittest, random, sys, time, re, getpass
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util
import h2o_print as h2p, h2o_gbm
from h2o_xl import Key, Fcn, Assign, Cbind
# FIX! This has some abbreviated stuff from h2o...look back there for completeness, eventually
DO_QUANTILE = False
# details:
# we want to seed a random dictionary for our enums
# string.ascii_uppercase string.printable string.letters string.digits string.punctuation string.whitespace
# restricting the choices makes it easier to find the bad cases
randChars = "abeE01" + "$%+-.;|\t "
randChars = "abeE01" # bad..causes NAification. probably 1E0e is causing a problem
# randChars = "abfF01" # try this.. fails
# randChars = "abcdef" #
quoteChars = "\'\""
# don't use any quote characters. We'd have to protect combinations
quoteChars = ""
MIN_ENUM_WIDTH = 2
MAX_ENUM_WIDTH = 8
RAND_ENUM_LENGTH = True
# CUT_EXPR_CNT = 200
CUT_EXPR_CNT = 20
# ROWS=1000000
ROWS=10000
# ROWS=100
DO_PLOT = getpass.getuser()=='kevin'
DO_MEDIAN = True
MAX_QBINS = 1000
MULTI_PASS = 1
def random_enum(n, randChars=randChars, quoteChars=quoteChars):
# randomly return None 10% of the time
# if random.randint(0,9)==0:
# return 'huh' # empty string doesn't work for exec compare?
choiceStr = randChars + quoteChars
mightBeNumberOrWhite = True
while mightBeNumberOrWhite:
# H2O doesn't seem to tolerate random single or double quote in the first two rows.
# disallow that by not passing quoteChars for the first two rows (in call to here)
r = ''.join(random.choice(choiceStr) for x in range(n))
mightBeNumberOrWhite = h2o_util.might_h2o_think_number_or_whitespace(r)
return r
def create_enum_list(n=4, **kwargs):
# Allowing length one, we sometimes form single digit numbers that cause the whole column to NA
# see DparseTask.java for this effect
# FIX! if we allow 0, then we allow NA?. I guess we check for no missing, so can't allow NA
# too many retries allowing 1. try 2 min.
if RAND_ENUM_LENGTH:
enumList = [random_enum(n=random.randint(MIN_ENUM_WIDTH, MAX_ENUM_WIDTH), **kwargs) for i in range(n)]
else:
# a fixed width is sometimes good for finding badness
enumList = [random_enum(n=MAX_ENUM_WIDTH, **kwargs) for i in range(n)]
return enumList
def create_col_enum_list(inCount):
# create the per-column choice lists
colEnumList = []
for col in range(inCount):
enumList = create_enum_list(n=random.randint(1,4), quoteChars=quoteChars)
colEnumList.append(enumList)
return colEnumList
def write_syn_dataset(csvPathname, rowCount, inCount=1, outCount=1, SEED='12345678',
colSepChar=",", rowSepChar="\n", quoteChars="", colEnumList=None):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for row in range(rowCount):
# doesn't guarantee that 10000 rows have 10000 unique enums in a column
# essentially sampling with replacement
rowData = []
for iCol in range(inCount):
# FIX! we should add some random NA?
ri = random.choice(colEnumList[iCol])
rowData.append(ri)
# output columns. always 0-10e6 with 2 digits of fp precision
for oCol in range(outCount):
ri = "%.2f" % random.uniform(0, 10e6)
rowData.append(ri)
# use the new Hive separator
rowDataCsv = colSepChar.join(map(str,rowData)) + rowSepChar
### sys.stdout.write(rowDataCsv)
dsf.write(rowDataCsv)
dsf.close()
return colEnumList
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(java_heap_GB=14)
@classmethod
def tearDownClass(cls):
# h2o.sleep(3600)
h2o.tear_down_cloud()
def test_exec2_enums_rand_cut(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
n = ROWS
tryList = [
(n, 10, 9, 'cE', 300),
]
# create key names to use for exec
eKeys = ['e%s' % i for i in range(10)]
# h2b.browseTheCloud()
trial = 0
for (rowCount, iColCount, oColCount, hex_key, timeoutSecs) in tryList:
colCount = iColCount + oColCount
hex_key = 'p'
colEnumList = create_col_enum_list(iColCount)
# create 100 possible cut expressions here, so we don't waste time below
rowExprList = []
print "Creating", CUT_EXPR_CNT, 'cut expressions'
for j in range(CUT_EXPR_CNT):
# init cutValue. None means no compare
cutValue = [None for i in range(iColCount)]
# build up a random cut expression
cols = random.sample(range(iColCount), random.randint(1,iColCount))
for c in cols:
# possible choices within the column
cel = colEnumList[c]
# for now the cutValues are numbers for the enum mappings
# FIX! hack. don't use encoding 0, maps to NA here? h2o doesn't like
# celChoice = str(random.choice(range(len(cel))))
celChoice = random.choice(range(len(cel)))
cutValue[c] = celChoice
cutExprList = []
pKey = Key('p')
for i,c in enumerate(cutValue):
if c is None:
continue
else:
# new ...ability to reference cols
# src[ src$age<17 && src$zip=95120 && ... , ]
# cutExprList.append('p$C'+str(i+1)+'=='+c)
# all column indexing in h2o-dev is with number
e = Fcn('==', c, pKey[:,i])
cutExprList.append(e)
cutExpr = None
for ce in cutExprList:
if cutExpr:
cutExpr = Fcn('&', cutExpr, ce)
else:
cutExpr = ce
print "cutExpr:", cutExpr
# should be two different keys in the sample
e = random.sample(eKeys,2)
fKey = e[0]
eKey = e[1]
# rowExpr = '%s[%s,];' % (hex_key, cutExpr)
hKey = Key(hex_key)
rowExpr = hKey[cutExpr, :]
print "rowExpr:", rowExpr
rowExprList.append(rowExpr)
# CREATE DATASET*******************************************
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_enums_' + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, iColCount, oColCount, SEEDPERFILE, colEnumList=colEnumList)
# PARSE*******************************************************
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30)
numRows, numCols, parse_key = h2o_cmd.infoFromParse(parseResult)
inspect = h2o_cmd.runInspect(key=parse_key)
missingList, valueList, numRows, numCols = h2o_cmd.infoFromInspect(inspect)
# print h2o.dump_json(inspect)
# (missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
# h2o_cmd.columnInfoFromInspect(parse_key, exceptionOnMissingValues=False)
# error if any col has constant values
# if len(constantValuesDict) != 0:
# raise Exception("Probably got a col NA'ed and constant values as a result %s" % constantValuesDict)
# INIT all possible key names used***************************
# remember. 1 indexing!
# build up the columns
Assign('b', [1,2,3])
# could also append 1 col at a time, by assigning to the next col number?
Assign('a', Cbind(['b' for i in range(colCount)]))
for eKey in eKeys:
Assign(eKey, 'a')
## print h2o.dump_json(e)
xList = []
eList = []
fList = []
for repeat in range(200):
# EXEC*******************************************************
# don't use exec_expr to avoid issues with Inspect following etc.
randICol = random.randint(0,iColCount-1)
randOCol = random.randint(iColCount, iColCount+oColCount-1)
# should be two different keys in the sample
e = random.sample(eKeys,2)
fKey = e[0]
eKey = e[1]
if 1==1:
start = time.time()
Assign(fKey, random.choice(rowExprList)).do()
elapsed = time.time() - start
execTime = elapsed
print "exec 2 took", elapsed, "seconds."
inspect = h2o_cmd.runInspect(key=fKey)
missingList, valueList, numRows, numCols = h2o_cmd.infoFromInspect(inspect)
if numRows==0 or numCols!=colCount:
h2p.red_print("Warning: Cut resulted in", numRows, "rows and", numCols, "cols. Quantile will abort")
# FIX! put quantile back in?
quantileTime = 0
# remove all keys*******************************************************
# what about hex_key?
if 1==0:
start = time.time()
h2o.nodes[0].remove_all_keys()
elapsed = time.time() - start
print "remove all keys end on ", csvFilename, 'took', elapsed, 'seconds.'
trial += 1
xList.append(trial)
eList.append(execTime)
fList.append(quantileTime)
# just get a plot of the last one (biggest)
if DO_PLOT:
xLabel = 'trial'
eLabel = 'exec cut time'
fLabel = 'quantile time'
eListTitle = ""
fListTitle = ""
h2o_gbm.plotLists(xList, xLabel, eListTitle, eList, eLabel, fListTitle, fList, fLabel)
if __name__ == '__main__':
h2o.unit_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.