gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python3
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
'''
IPMI Torture
------------
Use several threads in `op-test` to poke IPMI concurrently in a number
of "safe" ways, and see when the BMC explodes.
'''
import unittest
import time
import threading
import OpTestConfiguration
from common.OpTestSystem import OpSystemState
from common.OpTestConstants import OpTestConstants as BMC_CONST
class OobIpmiThread(threading.Thread):
def __init__(self, threadID, name, cmd, execution_time):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.cmd = cmd
self.execution_time = execution_time
conf = OpTestConfiguration.conf
self.cv_IPMI = conf.ipmi()
def run(self):
print(("Starting " + self.name))
self.oob_ipmi_thread(self.name, self.cmd, self.execution_time)
print(("Exiting " + self.name))
def oob_ipmi_thread(self, threadName, cmd, t):
execution_time = time.time() + 60*t
print(("Starting %s for oob-ipmi %s" % (threadName, cmd)))
while True:
try:
self.cv_IPMI.ipmitool.run(cmd, logcmd=False)
except:
pass
if time.time() > execution_time:
break
time.sleep(2)
class InbandIpmiThread(threading.Thread):
def __init__(self, threadID, name, ipmi_method, cmd, execution_time):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.ipmi_method = ipmi_method
self.cmd = cmd
self.execution_time = execution_time
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_SYSTEM = conf.system()
def run(self):
print(("Starting " + self.name))
self.inband_ipmi_thread(self.name, self.cmd, self.execution_time)
print(("Exiting " + self.name))
def inband_ipmi_thread(self, threadName, cmd, t):
execution_time = time.time() + 60*t
self.c = self.cv_HOST.get_ssh_connection()
print(("Starting %s for inband-ipmi %s" % (threadName, cmd)))
while True:
try:
self.c.run_command(self.ipmi_method + cmd)
except:
pass
if time.time() > execution_time:
break
time.sleep(2)
class SolConsoleThread(threading.Thread):
def __init__(self, threadID, name, test, execution_time):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.test = test
self.execution_time = execution_time
conf = OpTestConfiguration.conf
self.cv_SYSTEM = conf.system()
def run(self):
print(("Starting " + self.name))
self.sol_console_thread(self.name, self.execution_time)
print(("Exiting " + self.name))
def sol_console_thread(self, threadName, t):
self.c = self.cv_SYSTEM.console
# Enable kernel logging(printk) to console
self.c.run_command("echo 10 > /proc/sys/kernel/printk")
execution_time = time.time() + 60*self.execution_time
i = 0
while True:
print(("Iteration %s, SOL open/close" % i))
try:
self.c.get_console()
# Execute any host command(for console IO) if system is in runtime
if "runtime" in self.test:
try:
self.c.run_command("ipmitool power status")
# Enable console traffic by printing the processes/tasks to the console
self.c.run_command("echo t > /proc/sysrq-trigger")
except:
pass
self.c.close()
except:
pass
time.sleep(3)
i += 1
if time.time() > execution_time:
break
class IpmiInterfaceTorture(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
self.torture_time = 2400
self.bmc_type = conf.args.bmc_type
def runTest(self):
self.setup_test()
self.thread_list = []
# OOB IPMI Torture
torture_time = self.torture_time
cmd_list = ["sdr list", "fru print",
"sel list", "sensor list", "power status"]
for j in range(1, 3):
for idx, cmd in enumerate(cmd_list):
num = j*(idx + 1)
thread = OobIpmiThread(num, "Thread-%s" %
num, cmd, torture_time)
thread.start()
self.thread_list.append(thread)
if "skiroot" in self.test:
return
if self.test == "standby":
return
# In-band IPMI Torture (Open Interface)
cmd_list = ["sdr list", "fru print",
"sel list", "sensor list", "power status"]
for j in range(1, 3):
for idx, cmd in enumerate(cmd_list):
num = j*(idx + 1)
thread = InbandIpmiThread(
num, "Thread-%s" % num, BMC_CONST.IPMITOOL_OPEN, cmd, torture_time)
thread.start()
self.thread_list.append(thread)
if "FSP" in self.bmc_type:
return
# In-band IPMI Torture (USB Interface)
cmd_list = ["sdr list", "fru print",
"sel list", "sensor list", "power status"]
for idx, cmd in enumerate(cmd_list):
thread = InbandIpmiThread(
idx, "Thread-%s" % idx, BMC_CONST.IPMITOOL_USB, cmd, torture_time)
thread.start()
self.thread_list.append(thread)
def tearDown(self):
# wait for all the threads to finish
for thread in self.thread_list:
thread.join()
class ConsoleIpmiTorture(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
self.torture_time = 2400
def ipmi_interface_torture(self):
# OOB IPMI Torture
torture_time = self.torture_time
self.thread_list = []
cmd_list = ["sdr list", "fru print",
"sel list", "sensor list", "power status"]
for j in range(1, 3):
for idx, cmd in enumerate(cmd_list):
num = j*(idx + 1)
thread = OobIpmiThread(num, "Thread-%s" %
num, cmd, torture_time)
thread.start()
self.thread_list.append(thread)
if self.test == "standby":
return
if "skiroot" in self.test:
return
return # Don't enable below inband ipmi torture, console and ssh sessions make the o/p clutter
# In-band IPMI Torture
cmd_list = ["sdr list", "fru print",
"sel list", "sensor list", "power status"]
for j in range(1, 3):
for idx, cmd in enumerate(cmd_list):
num = j*(idx + 1)
thread = InbandIpmiThread(
num, "Thread-%s" % num, BMC_CONST.IPMITOOL_OPEN, cmd, torture_time)
thread.start()
self.thread_list.append(thread)
def console_torture(self):
thread = SolConsoleThread(
1, "SOL-Thread", self.test, self.torture_time)
thread.start()
self.thread_list.append(thread)
def runTest(self):
self.setup_test()
self.ipmi_interface_torture()
self.console_torture()
def tearDown(self):
# Wait for all the thread to finish
for thread in self.thread_list:
thread.join()
class SkirootConsoleTorture(ConsoleIpmiTorture):
def setup_test(self):
self.test = "skiroot_runtime"
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
class SkirootIpmiTorture(IpmiInterfaceTorture):
def setup_test(self):
self.test = "skiroot_runtime"
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
class RuntimeConsoleTorture(ConsoleIpmiTorture):
def setup_test(self):
self.test = "runtime"
self.cv_SYSTEM.goto_state(OpSystemState.OS)
self.c = self.cv_SYSTEM.console
class StandbyConsoleTorture(ConsoleIpmiTorture):
def setup_test(self):
self.test = "standby"
self.cv_SYSTEM.goto_state(OpSystemState.OFF)
self.c = self.cv_SYSTEM.console
class RuntimeIpmiInterfaceTorture(IpmiInterfaceTorture):
def setup_test(self):
self.test = "runtime"
self.cv_SYSTEM.goto_state(OpSystemState.OS)
self.c = self.cv_SYSTEM.console
class StandbyIpmiInterfaceTorture(IpmiInterfaceTorture):
def setup_test(self):
self.test = "standby"
self.cv_SYSTEM.goto_state(OpSystemState.OFF)
self.c = self.cv_SYSTEM.console
|
|
import copy
from functools import partial
import os.path
import sys
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import six
from fudge import Fake, patched_context
from nose.tools import ok_, eq_
from fabric.decorators import hosts, roles, task
from fabric.context_managers import settings
from fabric.main import (parse_arguments, _escape_split, find_fabfile,
load_fabfile as _load_fabfile, list_commands, _task_names,
COMMANDS_HEADER, NESTED_REMINDER)
import fabric.state
from fabric.tasks import Task, WrappedCallableTask
from fabric.task_utils import _crawl, crawl, merge
from utils import FabricTest, fabfile, path_prefix, aborts
# Stupid load_fabfile wrapper to hide newly added return value.
# WTB more free time to rewrite all this with objects :)
def load_fabfile(*args, **kwargs):
return _load_fabfile(*args, **kwargs)[:2]
#
# Basic CLI stuff
#
def test_argument_parsing():
for args, output in [
# Basic
('abc', ('abc', [], {}, [], [], [])),
# Arg
('ab:c', ('ab', ['c'], {}, [], [], [])),
# Kwarg
('a:b=c', ('a', [], {'b': 'c'}, [], [], [])),
# Arg and kwarg
('a:b=c,d', ('a', ['d'], {'b': 'c'}, [], [], [])),
# Multiple kwargs
('a:b=c,d=e', ('a', [], {'b': 'c', 'd': 'e'}, [], [], [])),
# Host
('abc:host=foo', ('abc', [], {}, ['foo'], [], [])),
# Hosts with single host
('abc:hosts=foo', ('abc', [], {}, ['foo'], [], [])),
# Hosts with multiple hosts
# Note: in a real shell, one would need to quote or escape "foo;bar".
# But in pure-Python that would get interpreted literally, so we don't.
('abc:hosts=foo;bar', ('abc', [], {}, ['foo', 'bar'], [], [])),
# Exclude hosts
('abc:hosts=foo;bar,exclude_hosts=foo', ('abc', [], {}, ['foo', 'bar'], [], ['foo'])),
('abc:hosts=foo;bar,exclude_hosts=foo;bar', ('abc', [], {}, ['foo', 'bar'], [], ['foo', 'bar'])),
# Empty string args
("task:x=y,z=", ('task', [], {'x': 'y', 'z': ''}, [], [], [])),
("task:foo,,x=y", ('task', ['foo', ''], {'x': 'y'}, [], [], [])),
]:
yield eq_, parse_arguments([args]), [output]
def test_escaped_task_arg_split():
"""
Allow backslashes to escape the task argument separator character
"""
argstr = r"foo,bar\,biz\,baz,what comes after baz?"
eq_(
_escape_split(',', argstr),
['foo', 'bar,biz,baz', 'what comes after baz?']
)
def test_escaped_task_kwarg_split():
"""
Allow backslashes to escape the = in x=y task kwargs
"""
argstr = r"cmd:arg,escaped\,arg,nota\=kwarg,regular=kwarg,escaped=regular\=kwarg"
args = ['arg', 'escaped,arg', 'nota=kwarg']
kwargs = {'regular': 'kwarg', 'escaped': 'regular=kwarg'}
eq_(
parse_arguments([argstr])[0],
('cmd', args, kwargs, [], [], []),
)
#
# Host/role decorators
#
# Allow calling Task.get_hosts as function instead (meh.)
def get_hosts_and_effective_roles(command, *args):
return WrappedCallableTask(command).get_hosts_and_effective_roles(*args)
def eq_hosts(command, expected_hosts, cli_hosts=None, excluded_hosts=None, env=None, func=set):
eq_(func(get_hosts_and_effective_roles(command, cli_hosts or [], [], excluded_hosts or [], env)[0]),
func(expected_hosts))
def eq_effective_roles(command, expected_effective_roles, cli_roles=None, env=None, func=set):
eq_(func(get_hosts_and_effective_roles(command, [], cli_roles or [], [], env)[1]),
func(expected_effective_roles))
true_eq_hosts = partial(eq_hosts, func=lambda x: x)
def test_hosts_decorator_by_itself():
"""
Use of @hosts only
"""
host_list = ['a', 'b']
@hosts(*host_list)
def command():
pass
eq_hosts(command, host_list)
fake_roles = {
'r1': ['a', 'b'],
'r2': ['b', 'c']
}
def test_roles_decorator_by_itself():
"""
Use of @roles only
"""
@roles('r1')
def command():
pass
eq_hosts(command, ['a', 'b'], env={'roledefs': fake_roles})
eq_effective_roles(command, ['r1'], env={'roledefs': fake_roles})
def test_roles_decorator_overrides_env_roles():
"""
If @roles is used it replaces any env.roles value
"""
@roles('r1')
def command():
pass
eq_effective_roles(command, ['r1'], env={'roledefs': fake_roles,
'roles': ['r2']})
def test_cli_roles_override_decorator_roles():
"""
If CLI roles are provided they replace roles defined in @roles.
"""
@roles('r1')
def command():
pass
eq_effective_roles(command, ['r2'], cli_roles=['r2'], env={'roledefs': fake_roles})
def test_hosts_and_roles_together():
"""
Use of @roles and @hosts together results in union of both
"""
@roles('r1', 'r2')
@hosts('d')
def command():
pass
eq_hosts(command, ['a', 'b', 'c', 'd'], env={'roledefs': fake_roles})
eq_effective_roles(command, ['r1', 'r2'], env={'roledefs': fake_roles})
def test_host_role_merge_deduping():
"""
Use of @roles and @hosts dedupes when merging
"""
@roles('r1', 'r2')
@hosts('a')
def command():
pass
# Not ['a', 'a', 'b', 'c'] or etc
true_eq_hosts(command, ['a', 'b', 'c'], env={'roledefs': fake_roles})
def test_host_role_merge_deduping_off():
"""
Allow turning deduping off
"""
@roles('r1', 'r2')
@hosts('a')
def command():
pass
with settings(dedupe_hosts=False):
true_eq_hosts(
command,
# 'a' 1x host 1x role
# 'b' 1x r1 1x r2
['a', 'a', 'b', 'b', 'c'],
env={'roledefs': fake_roles}
)
tuple_roles = {
'r1': ('a', 'b'),
'r2': ('b', 'c'),
}
def test_roles_as_tuples():
"""
Test that a list of roles as a tuple succeeds
"""
@roles('r1')
def command():
pass
eq_hosts(command, ['a', 'b'], env={'roledefs': tuple_roles})
eq_effective_roles(command, ['r1'], env={'roledefs': fake_roles})
def test_hosts_as_tuples():
"""
Test that a list of hosts as a tuple succeeds
"""
def command():
pass
eq_hosts(command, ['foo', 'bar'], env={'hosts': ('foo', 'bar')})
def test_hosts_decorator_overrides_env_hosts():
"""
If @hosts is used it replaces any env.hosts value
"""
@hosts('bar')
def command():
pass
eq_hosts(command, ['bar'], env={'hosts': ['foo']})
def test_hosts_decorator_overrides_env_hosts_with_task_decorator_first():
"""
If @hosts is used it replaces any env.hosts value even with @task
"""
@task
@hosts('bar')
def command():
pass
eq_hosts(command, ['bar'], env={'hosts': ['foo']})
def test_hosts_decorator_overrides_env_hosts_with_task_decorator_last():
@hosts('bar')
@task
def command():
pass
eq_hosts(command, ['bar'], env={'hosts': ['foo']})
def test_hosts_stripped_env_hosts():
"""
Make sure hosts defined in env.hosts are cleaned of extra spaces
"""
def command():
pass
myenv = {'hosts': [' foo ', 'bar '], 'roles': [], 'exclude_hosts': []}
eq_hosts(command, ['foo', 'bar'], env=myenv)
spaced_roles = {
'r1': [' a ', ' b '],
'r2': ['b', 'c'],
}
def test_roles_stripped_env_hosts():
"""
Make sure hosts defined in env.roles are cleaned of extra spaces
"""
@roles('r1')
def command():
pass
eq_hosts(command, ['a', 'b'], env={'roledefs': spaced_roles})
dict_roles = {
'r1': {'hosts': ['a', 'b']},
'r2': ['b', 'c'],
}
def test_hosts_in_role_dict():
"""
Make sure hosts defined in env.roles are cleaned of extra spaces
"""
@roles('r1')
def command():
pass
eq_hosts(command, ['a', 'b'], env={'roledefs': dict_roles})
def test_hosts_decorator_expands_single_iterable():
"""
@hosts(iterable) should behave like @hosts(*iterable)
"""
host_list = ['foo', 'bar']
@hosts(host_list)
def command():
pass
eq_(command.hosts, host_list)
def test_roles_decorator_expands_single_iterable():
"""
@roles(iterable) should behave like @roles(*iterable)
"""
role_list = ['foo', 'bar']
@roles(role_list)
def command():
pass
eq_(command.roles, role_list)
#
# Host exclusion
#
def dummy():
pass
def test_get_hosts_excludes_cli_exclude_hosts_from_cli_hosts():
eq_hosts(dummy, ['bar'], cli_hosts=['foo', 'bar'], excluded_hosts=['foo'])
def test_get_hosts_excludes_cli_exclude_hosts_from_decorator_hosts():
@hosts('foo', 'bar')
def command():
pass
eq_hosts(command, ['bar'], excluded_hosts=['foo'])
def test_get_hosts_excludes_global_exclude_hosts_from_global_hosts():
fake_env = {'hosts': ['foo', 'bar'], 'exclude_hosts': ['foo']}
eq_hosts(dummy, ['bar'], env=fake_env)
#
# Basic role behavior
#
@aborts
def test_aborts_on_nonexistent_roles():
"""
Aborts if any given roles aren't found
"""
merge([], ['badrole'], [], {})
def test_accepts_non_list_hosts():
"""
Coerces given host string to a one-item list
"""
assert merge('badhosts', [], [], {}) == ['badhosts']
lazy_role = {'r1': lambda: ['a', 'b']}
def test_lazy_roles():
"""
Roles may be callables returning lists, as well as regular lists
"""
@roles('r1')
def command():
pass
eq_hosts(command, ['a', 'b'], env={'roledefs': lazy_role})
#
# Fabfile finding
#
class TestFindFabfile(FabricTest):
"""Test Fabric's fabfile discovery mechanism."""
def test_find_fabfile_can_discovery_package(self):
"""Fabric should be capable of loading a normal package."""
path = self.mkfile("__init__.py", "")
name = os.path.dirname(path)
assert find_fabfile([name]) is not None
def test_find_fabfile_can_discovery_package_with_pyc_only(self):
"""
Fabric should be capable of loading a package with __init__.pyc only.
"""
path = self.mkfile("__init__.pyc", "")
name = os.path.dirname(path)
assert find_fabfile([name]) is not None
def test_find_fabfile_should_refuse_fake_package(self):
"""Fabric should refuse to load a non-package directory."""
path = self.mkfile("foo.py", "")
name = os.path.dirname(path)
assert find_fabfile([name]) is None
#
# Fabfile loading
#
def run_load_fabfile(path, sys_path):
# Module-esque object
fake_module = Fake().has_attr(__dict__={})
# Fake importlib.import_module
importer = Fake(callable=True).returns(fake_module)
# Snapshot sys.path for restore
orig_path = copy.copy(sys.path)
# Update with fake path
sys.path = sys_path
# Test for side effects
load_fabfile(path, importer=importer)
eq_(sys.path, sys_path)
# Restore
sys.path = orig_path
def test_load_fabfile_should_not_remove_real_path_elements():
for fabfile_path, sys_dot_path in (
# Directory not in path
('subdir/fabfile.py', ['not_subdir']),
('fabfile.py', ['nope']),
# Directory in path, but not at front
('subdir/fabfile.py', ['not_subdir', 'subdir']),
('fabfile.py', ['not_subdir', '']),
('fabfile.py', ['not_subdir', '', 'also_not_subdir']),
# Directory in path, and at front already
('subdir/fabfile.py', ['subdir']),
('subdir/fabfile.py', ['subdir', 'not_subdir']),
('fabfile.py', ['', 'some_dir', 'some_other_dir']),
):
yield run_load_fabfile, fabfile_path, sys_dot_path
#
# Namespacing and new-style tasks
#
class TestTaskAliases(FabricTest):
def test_flat_alias(self):
f = fabfile("flat_alias.py")
with path_prefix(f):
docs, funcs = load_fabfile(f)
eq_(len(funcs), 2)
ok_("foo" in funcs)
ok_("foo_aliased" in funcs)
def test_nested_alias(self):
f = fabfile("nested_alias.py")
with path_prefix(f):
docs, funcs = load_fabfile(f)
ok_("nested" in funcs)
eq_(len(funcs["nested"]), 2)
ok_("foo" in funcs["nested"])
ok_("foo_aliased" in funcs["nested"])
def test_flat_aliases(self):
f = fabfile("flat_aliases.py")
with path_prefix(f):
docs, funcs = load_fabfile(f)
eq_(len(funcs), 3)
ok_("foo" in funcs)
ok_("foo_aliased" in funcs)
ok_("foo_aliased_two" in funcs)
def test_nested_aliases(self):
f = fabfile("nested_aliases.py")
with path_prefix(f):
docs, funcs = load_fabfile(f)
ok_("nested" in funcs)
eq_(len(funcs["nested"]), 3)
ok_("foo" in funcs["nested"])
ok_("foo_aliased" in funcs["nested"])
ok_("foo_aliased_two" in funcs["nested"])
class TestNamespaces(FabricTest):
def setup(self):
# Parent class preserves current env
super(TestNamespaces, self).setup()
# Reset new-style-tests flag so running tests via Fab itself doesn't
# muck with it.
import fabric.state
if 'new_style_tasks' in fabric.state.env:
del fabric.state.env['new_style_tasks']
def test_implicit_discovery(self):
"""
Default to automatically collecting all tasks in a fabfile module
"""
implicit = fabfile("implicit_fabfile.py")
with path_prefix(implicit):
docs, funcs = load_fabfile(implicit)
eq_(len(funcs), 2)
ok_("foo" in funcs)
ok_("bar" in funcs)
def test_exception_exclusion(self):
"""
Exception subclasses should not be considered as tasks
"""
exceptions = fabfile("exceptions_fabfile.py")
with path_prefix(exceptions):
docs, funcs = load_fabfile(exceptions)
ok_("some_task" in funcs)
ok_("NotATask" not in funcs)
def test_explicit_discovery(self):
"""
If __all__ is present, only collect the tasks it specifies
"""
explicit = fabfile("explicit_fabfile.py")
with path_prefix(explicit):
docs, funcs = load_fabfile(explicit)
eq_(len(funcs), 1)
ok_("foo" in funcs)
ok_("bar" not in funcs)
def test_should_load_decorated_tasks_only_if_one_is_found(self):
"""
If any new-style tasks are found, *only* new-style tasks should load
"""
module = fabfile('decorated_fabfile.py')
with path_prefix(module):
docs, funcs = load_fabfile(module)
eq_(len(funcs), 1)
ok_('foo' in funcs)
def test_class_based_tasks_are_found_with_proper_name(self):
"""
Wrapped new-style tasks should preserve their function names
"""
module = fabfile('decorated_fabfile_with_classbased_task.py')
with path_prefix(module):
docs, funcs = load_fabfile(module)
eq_(len(funcs), 1)
ok_('foo' in funcs)
def test_class_based_tasks_are_found_with_variable_name(self):
"""
A new-style tasks with undefined name attribute should use the instance
variable name.
"""
module = fabfile('classbased_task_fabfile.py')
with path_prefix(module):
docs, funcs = load_fabfile(module)
eq_(len(funcs), 1)
ok_('foo' in funcs)
eq_(funcs['foo'].name, 'foo')
def test_recursion_steps_into_nontask_modules(self):
"""
Recursive loading will continue through modules with no tasks
"""
module = fabfile('deep')
with path_prefix(module):
docs, funcs = load_fabfile(module)
eq_(len(funcs), 1)
ok_('submodule.subsubmodule.deeptask' in _task_names(funcs))
def test_newstyle_task_presence_skips_classic_task_modules(self):
"""
Classic-task-only modules shouldn't add tasks if any new-style tasks exist
"""
module = fabfile('deep')
with path_prefix(module):
docs, funcs = load_fabfile(module)
eq_(len(funcs), 1)
ok_('submodule.classic_task' not in _task_names(funcs))
def test_task_decorator_plays_well_with_others(self):
"""
@task, when inside @hosts/@roles, should not hide the decorated task.
"""
module = fabfile('decorator_order')
with path_prefix(module):
docs, funcs = load_fabfile(module)
# When broken, crawl() finds None for 'foo' instead.
eq_(crawl('foo', funcs), funcs['foo'])
#
# --list output
#
def eq_output(docstring, format_, expected):
return eq_(
"\n".join(list_commands(docstring, format_)),
expected
)
def list_output(module, format_, expected):
module = fabfile(module)
with path_prefix(module):
docstring, tasks = load_fabfile(module)
with patched_context(fabric.state, 'commands', tasks):
eq_output(docstring, format_, expected)
def test_list_output():
lead = ":\n\n "
normal_head = COMMANDS_HEADER + lead
nested_head = COMMANDS_HEADER + NESTED_REMINDER + lead
for desc, module, format_, expected in (
("shorthand (& with namespacing)", 'deep', 'short', "submodule.subsubmodule.deeptask"),
("normal (& with namespacing)", 'deep', 'normal', normal_head + "submodule.subsubmodule.deeptask"),
("normal (with docstring)", 'docstring', 'normal', normal_head + "foo Foos!"),
("nested (leaf only)", 'deep', 'nested', nested_head + """submodule:
subsubmodule:
deeptask"""),
("nested (full)", 'tree', 'nested', nested_head + """build_docs
deploy
db:
migrate
system:
install_package
debian:
update_apt"""),
):
list_output.description = "--list output: %s" % desc
yield list_output, module, format_, expected
del list_output.description
def name_to_task(name):
t = Task()
t.name = name
return t
def strings_to_tasks(d):
ret = {}
for key, value in six.iteritems(d):
if isinstance(value, Mapping):
val = strings_to_tasks(value)
else:
val = name_to_task(value)
ret[key] = val
return ret
def test_task_names():
for desc, input_, output in (
('top level (single)', {'a': 5}, ['a']),
('top level (multiple, sorting)', {'a': 5, 'b': 6}, ['a', 'b']),
('just nested', {'a': {'b': 5}}, ['a.b']),
('mixed', {'a': 5, 'b': {'c': 6}}, ['a', 'b.c']),
('top level comes before nested', {'z': 5, 'b': {'c': 6}}, ['z', 'b.c']),
('peers sorted equally', {'z': 5, 'b': {'c': 6}, 'd': {'e': 7}}, ['z', 'b.c', 'd.e']),
(
'complex tree',
{
'z': 5,
'b': {
'c': 6,
'd': {
'e': {
'f': '7'
}
},
'g': 8
},
'h': 9,
'w': {
'y': 10
}
},
['h', 'z', 'b.c', 'b.g', 'b.d.e.f', 'w.y']
),
):
eq_.description = "task name flattening: %s" % desc
yield eq_, _task_names(strings_to_tasks(input_)), output
del eq_.description
def test_crawl():
for desc, name, mapping, output in (
("base case", 'a', {'a': 5}, 5),
("one level", 'a.b', {'a': {'b': 5}}, 5),
("deep", 'a.b.c.d.e', {'a': {'b': {'c': {'d': {'e': 5}}}}}, 5),
("full tree", 'a.b.c', {'a': {'b': {'c': 5}, 'd': 6}, 'z': 7}, 5)
):
eq_.description = "crawling dotted names: %s" % desc
yield eq_, _crawl(name, mapping), output
del eq_.description
def test_mapping_task_classes():
"""
Task classes implementing the mapping interface shouldn't break --list
"""
list_output('mapping', 'normal', COMMANDS_HEADER + """:\n
mapping_task""")
def test_default_task_listings():
"""
@task(default=True) should cause task to also load under module's name
"""
for format_, expected in (
('short', """mymodule
mymodule.long_task_name"""),
('normal', COMMANDS_HEADER + """:\n
mymodule
mymodule.long_task_name"""),
('nested', COMMANDS_HEADER + NESTED_REMINDER + """:\n
mymodule:
long_task_name""")
):
list_output.description = "Default task --list output: %s" % format_
yield list_output, 'default_tasks', format_, expected
del list_output.description
def test_default_task_loading():
"""
crawl() should return default tasks where found, instead of module objs
"""
docs, tasks = load_fabfile(fabfile('default_tasks'))
ok_(isinstance(crawl('mymodule', tasks), Task))
def test_aliases_appear_in_fab_list():
"""
--list should include aliases
"""
list_output('nested_alias', 'short', """nested.foo
nested.foo_aliased""")
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from unittest import TestCase
import mock
from ming.odm import ThreadLocalORMSession
import webtest
from allura.tests import TestController
from allura.tests.decorators import with_tracker
from allura import model as M
from forgeimporters.forge import tracker
class TestTrackerImporter(TestCase):
@mock.patch.object(tracker.h, 'make_app_admin_only')
@mock.patch.object(tracker, 'g')
@mock.patch.object(tracker, 'c')
@mock.patch.object(tracker, 'ThreadLocalORMSession')
@mock.patch.object(tracker, 'session')
@mock.patch.object(tracker, 'M')
@mock.patch.object(tracker, 'TM')
def test_import_tool(self, TM, M, session, tlos, c, g, mao):
importer = tracker.ForgeTrackerImporter()
importer._load_json = mock.Mock(return_value={
'tracker_config': {
'_id': 'orig_id',
'options': {
'foo': 'bar',
},
},
'open_status_names': 'open statuses',
'closed_status_names': 'closed statuses',
'custom_fields': 'fields',
'saved_bins': 'bins',
'tickets': [
{
'reported_by': 'rb1',
'assigned_to': 'at1',
'ticket_num': 1,
'description': 'd1',
'created_date': '2013-09-01',
'mod_date': '2013-09-02',
'summary': 's1',
'custom_fields': 'cf1',
'status': 'st1',
'labels': 'l1',
'votes_down': 1,
'votes_up': 2,
'private': False,
'discussion_thread': {'posts': 'comments1'},
},
{
'reported_by': 'rb2',
'assigned_to': 'at2',
'ticket_num': 100,
'description': 'd2',
'created_date': '2013-09-03',
'mod_date': '2013-09-04',
'summary': 's2',
'custom_fields': 'cf2',
'status': 'st2',
'labels': 'l2',
'votes_down': 3,
'votes_up': 5,
'private': True,
'discussion_thread': {'posts': 'comments2'},
},
],
})
anonymous = mock.Mock(_id=None, is_anonymous=lambda: True)
reporter = mock.Mock(is_anonymous=lambda: False)
author = mock.Mock(is_anonymous=lambda: False)
importer.get_user = mock.Mock(side_effect=[
reporter, author,
anonymous, anonymous,
])
importer.annotate = mock.Mock(
side_effect=['ad1', 'aad1', 'ad2', 'aad2'])
importer.process_comments = mock.Mock()
importer.process_bins = mock.Mock()
project, user = mock.Mock(), mock.Mock()
app = project.install_app.return_value
app.config.options.mount_point = 'mount_point'
app.config.options.import_id = {
'source': 'Allura',
'app_config_id': 'orig_id',
}
app.config.options.get = lambda *a: getattr(app.config.options, *a)
app.url = 'foo'
tickets = TM.Ticket.side_effect = [mock.Mock(), mock.Mock()]
importer.import_tool(project, user,
mount_point='mount_point', mount_label='mount_label')
project.install_app.assert_called_once_with(
'tickets', 'mount_point', 'mount_label',
open_status_names='open statuses',
closed_status_names='closed statuses',
import_id={
'source': 'Allura',
'app_config_id': 'orig_id',
},
foo='bar',
)
self.assertEqual(importer.annotate.call_args_list, [
mock.call('d1', author, 'at1', label=' owned'),
mock.call('ad1', reporter, 'rb1', label=' created'),
mock.call('d2', anonymous, 'at2', label=' owned'),
mock.call('ad2', anonymous, 'rb2', label=' created'),
])
self.assertEqual(TM.Ticket.call_args_list, [
mock.call(
app_config_id=app.config._id,
import_id={
'source': 'Allura',
'app_config_id': 'orig_id',
'source_id': 1,
},
description='aad1',
created_date=datetime(2013, 9, 1),
mod_date=datetime(2013, 9, 2),
ticket_num=1,
summary='s1',
custom_fields='cf1',
status='st1',
labels='l1',
votes_down=1,
votes_up=2,
votes=1,
assigned_to_id=author._id,
),
mock.call(
app_config_id=app.config._id,
import_id={
'source': 'Allura',
'app_config_id': 'orig_id',
'source_id': 100,
},
description='aad2',
created_date=datetime(2013, 9, 3),
mod_date=datetime(2013, 9, 4),
ticket_num=100,
summary='s2',
custom_fields='cf2',
status='st2',
labels='l2',
votes_down=3,
votes_up=5,
votes=2,
assigned_to_id=None,
),
])
self.assertEqual(tickets[0].private, False)
self.assertEqual(tickets[1].private, True)
self.assertEqual(importer.process_comments.call_args_list, [
mock.call(tickets[0], 'comments1'),
mock.call(tickets[1], 'comments2'),
])
self.assertEqual(tlos.flush_all.call_args_list, [
mock.call(),
mock.call(),
])
self.assertEqual(session.return_value.flush.call_args_list, [
mock.call(tickets[0]),
mock.call(tickets[1]),
])
self.assertEqual(session.return_value.expunge.call_args_list, [
mock.call(tickets[0]),
mock.call(tickets[1]),
])
self.assertEqual(app.globals.custom_fields, 'fields')
importer.process_bins.assert_called_once_with(app, 'bins')
self.assertEqual(app.globals.last_ticket_num, 100)
M.AuditLog.log.assert_called_once_with(
'import tool mount_point from exported Allura JSON',
project=project, user=user, url='foo')
g.post_event.assert_called_once_with('project_updated')
app.globals.invalidate_bin_counts.assert_called_once_with()
@mock.patch.object(tracker, 'ThreadLocalORMSession')
@mock.patch.object(tracker, 'M')
@mock.patch.object(tracker, 'h')
def test_import_tool_failure(self, h, M, ThreadLocalORMSession):
M.session.artifact_orm_session._get.side_effect = ValueError
project = mock.Mock()
user = mock.Mock()
tracker_json = {
'tracker_config': {'_id': 'orig_id', 'options': {}},
'open_status_names': 'os',
'closed_status_names': 'cs',
}
importer = tracker.ForgeTrackerImporter()
importer._load_json = mock.Mock(return_value=tracker_json)
self.assertRaises(
ValueError, importer.import_tool, project, user, project_name='project_name',
mount_point='mount_point', mount_label='mount_label')
h.make_app_admin_only.assert_called_once_with(
project.install_app.return_value)
@mock.patch.object(tracker, 'M')
def test_get_user(self, M):
importer = tracker.ForgeTrackerImporter()
M.User.anonymous.return_value = 'anon'
M.User.by_username.return_value = 'bar'
self.assertEqual(importer.get_user('foo'), 'bar')
self.assertEqual(M.User.anonymous.call_count, 0)
self.assertEqual(importer.get_user(None), 'anon')
self.assertEqual(M.User.anonymous.call_count, 1)
M.User.by_username.return_value = None
self.assertEqual(importer.get_user('foo'), 'anon')
self.assertEqual(M.User.anonymous.call_count, 2)
def test_annotate(self):
importer = tracker.ForgeTrackerImporter()
user = mock.Mock(_id=1)
user.is_anonymous.return_value = False
self.assertEqual(importer.annotate('foo', user, 'bar'), 'foo')
user.is_anonymous.return_value = True
self.assertEqual(importer.annotate('foo', user, 'bar'),
'*Originally by:* bar\n\nfoo')
self.assertEqual(importer.annotate('foo', user, 'nobody'), 'foo')
self.assertEqual(importer.annotate('foo', user, None), 'foo')
@mock.patch.object(tracker, 'File')
@mock.patch.object(tracker, 'c')
def test_process_comments(self, c, File):
importer = tracker.ForgeTrackerImporter()
author = mock.Mock()
importer.get_user = mock.Mock(return_value=author)
importer.annotate = mock.Mock(side_effect=['at1', 'at2'])
ticket = mock.Mock()
add_post = ticket.discussion_thread.add_post
ama = add_post.return_value.add_multiple_attachments
File.side_effect = ['f1', 'f2', 'f3', 'f4']
comments = [
{
'author': 'a1',
'text': 't1',
'timestamp': '2013-09-01',
'attachments': [{'url': 'u1'}, {'url': 'u2'}],
},
{
'author': 'a2',
'text': 't2',
'timestamp': '2013-09-02',
'attachments': [{'url': 'u3'}, {'url': 'u4'}],
},
]
importer.process_comments(ticket, comments)
self.assertEqual(importer.get_user.call_args_list,
[mock.call('a1'), mock.call('a2')])
self.assertEqual(importer.annotate.call_args_list, [
mock.call('t1', author, 'a1'),
mock.call('t2', author, 'a2'),
])
self.assertEqual(add_post.call_args_list, [
mock.call(text='at1', ignore_security=True,
timestamp=datetime(2013, 9, 1)),
mock.call(text='at2', ignore_security=True,
timestamp=datetime(2013, 9, 2)),
])
self.assertEqual(File.call_args_list, [
mock.call('u1'),
mock.call('u2'),
mock.call('u3'),
mock.call('u4'),
])
self.assertEqual(ama.call_args_list, [
mock.call(['f1', 'f2']),
mock.call(['f3', 'f4']),
])
@mock.patch.object(tracker, 'TM')
def test_process_bins(self, TM):
app = mock.Mock()
app.config._id = 1
importer = tracker.ForgeTrackerImporter()
importer.process_bins(app, [{'_id': 1, 'b': 1}, {'b': 2}])
TM.Bin.query.remove.assert_called_once_with({'app_config_id': 1})
self.assertEqual(TM.Bin.call_args_list, [
mock.call(app_config_id=1, b=1),
mock.call(app_config_id=1, b=2),
])
class TestForgeTrackerImportController(TestController, TestCase):
def setUp(self):
"""Mount Allura importer on the Tracker admin controller"""
super(TestForgeTrackerImportController, self).setUp()
from forgetracker.tracker_main import TrackerAdminController
TrackerAdminController._importer = \
tracker.ForgeTrackerImportController(tracker.ForgeTrackerImporter())
@with_tracker
def test_index(self):
r = self.app.get('/p/test/admin/bugs/_importer/')
self.assertIsNotNone(r.html.find(attrs=dict(name="tickets_json")))
self.assertIsNotNone(r.html.find(attrs=dict(name="mount_label")))
self.assertIsNotNone(r.html.find(attrs=dict(name="mount_point")))
@with_tracker
@mock.patch('forgeimporters.forge.tracker.save_importer_upload')
@mock.patch('forgeimporters.base.import_tool')
def test_create(self, import_tool, sui):
project = M.Project.query.get(shortname='test')
params = {
'tickets_json': webtest.Upload('tickets.json', '{"key": "val"}'),
'mount_label': 'mylabel',
'mount_point': 'mymount',
}
r = self.app.post('/p/test/admin/bugs/_importer/create', params,
status=302)
self.assertEqual(r.location, 'http://localhost/p/test/admin/')
sui.assert_called_once_with(project, 'tickets.json', '{"key": "val"}')
self.assertEqual(
u'mymount', import_tool.post.call_args[1]['mount_point'])
self.assertEqual(
u'mylabel', import_tool.post.call_args[1]['mount_label'])
@with_tracker
@mock.patch('forgeimporters.forge.tracker.save_importer_upload')
@mock.patch('forgeimporters.base.import_tool')
def test_create_limit(self, import_tool, sui):
project = M.Project.query.get(shortname='test')
project.set_tool_data('ForgeTrackerImporter', pending=1)
ThreadLocalORMSession.flush_all()
params = {
'tickets_json': webtest.Upload('tickets.json', '{"key": "val"}'),
'mount_label': 'mylabel',
'mount_point': 'mymount',
}
r = self.app.post('/p/test/admin/bugs/_importer/create', params,
status=302).follow()
self.assertIn('Please wait and try again', r)
self.assertEqual(import_tool.post.call_count, 0)
|
|
import pytest
import os
if not os.environ.get('USE_ASYNCIO', False):
raise pytest.skip("Only for asyncio")
from unittest import TestCase, main
try:
from unittest.mock import Mock, call
except ImportError:
from mock import Mock, call
from autobahn.asyncio.rawsocket import PrefixProtocol, RawSocketClientProtocol, RawSocketServerProtocol, \
WampRawSocketClientFactory, WampRawSocketServerFactory
from autobahn.asyncio.util import get_serializers
from autobahn.wamp import message
class Test(TestCase):
def test_sers(self):
serializers = get_serializers()
self.assertTrue(len(serializers) > 0)
m = serializers[0]().serialize(message.Abort(u'close'))
print(m)
self.assertTrue(m)
def test_prefix(self):
p = PrefixProtocol()
transport = Mock()
receiver = Mock()
p.stringReceived = receiver
p.connection_made(transport)
small_msg = b'\x00\x00\x00\x04abcd'
p.data_received(small_msg)
receiver.assert_called_once_with(b'abcd')
self.assertEqual(len(p._buffer), 0)
p.sendString(b'abcd')
# print(transport.write.call_args_list)
transport.write.assert_has_calls([call(b'\x00\x00\x00\x04'), call(b'abcd')])
transport.reset_mock()
receiver.reset_mock()
big_msg = b'\x00\x00\x00\x0C' + b'0123456789AB'
p.data_received(big_msg[0:2])
self.assertFalse(receiver.called)
p.data_received(big_msg[2:6])
self.assertFalse(receiver.called)
p.data_received(big_msg[6:11])
self.assertFalse(receiver.called)
p.data_received(big_msg[11:16])
receiver.assert_called_once_with(b'0123456789AB')
transport.reset_mock()
receiver.reset_mock()
two_messages = b'\x00\x00\x00\x04' + b'abcd' + b'\x00\x00\x00\x05' + b'12345' + b'\x00'
p.data_received(two_messages)
receiver.assert_has_calls([call(b'abcd'), call(b'12345')])
self.assertEqual(p._buffer, b'\x00')
def test_is_closed(self):
class CP(RawSocketClientProtocol):
@property
def serializer_id(self):
return 1
client = CP()
on_hs = Mock()
transport = Mock()
receiver = Mock()
client.stringReceived = receiver
client._on_handshake_complete = on_hs
self.assertTrue(client.is_closed.done())
client.connection_made(transport)
self.assertFalse(client.is_closed.done())
client.connection_lost(None)
self.assertTrue(client.is_closed.done())
def test_raw_socket_server1(self):
server = RawSocketServerProtocol(max_size=10000)
ser = Mock(return_value=True)
on_hs = Mock()
transport = Mock()
receiver = Mock()
server.supports_serializer = ser
server.stringReceived = receiver
server._on_handshake_complete = on_hs
server.stringReceived = receiver
server.connection_made(transport)
hs = b'\x7F\xF1\x00\x00' + b'\x00\x00\x00\x04abcd'
server.data_received(hs)
ser.assert_called_once_with(1)
on_hs.assert_called_once_with()
self.assertTrue(transport.write.called)
transport.write.assert_called_once_with(b'\x7F\x51\x00\x00')
self.assertFalse(transport.close.called)
receiver.assert_called_once_with(b'abcd')
def test_raw_socket_server_errors(self):
server = RawSocketServerProtocol(max_size=10000)
ser = Mock(return_value=True)
on_hs = Mock()
transport = Mock()
receiver = Mock()
server.supports_serializer = ser
server.stringReceived = receiver
server._on_handshake_complete = on_hs
server.stringReceived = receiver
server.connection_made(transport)
server.data_received(b'abcdef')
transport.close.assert_called_once_with()
server = RawSocketServerProtocol(max_size=10000)
ser = Mock(return_value=False)
on_hs = Mock()
transport = Mock(spec_set=('close', 'write', 'get_extra_info'))
receiver = Mock()
server.supports_serializer = ser
server.stringReceived = receiver
server._on_handshake_complete = on_hs
server.stringReceived = receiver
server.connection_made(transport)
server.data_received(b'\x7F\xF1\x00\x00')
transport.close.assert_called_once_with()
transport.write.assert_called_once_with(b'\x7F\x10\x00\x00')
def test_raw_socket_client1(self):
class CP(RawSocketClientProtocol):
@property
def serializer_id(self):
return 1
client = CP()
on_hs = Mock()
transport = Mock()
receiver = Mock()
client.stringReceived = receiver
client._on_handshake_complete = on_hs
client.connection_made(transport)
client.data_received(b'\x7F\xF1\x00\x00' + b'\x00\x00\x00\x04abcd')
on_hs.assert_called_once_with()
self.assertTrue(transport.write.called)
transport.write.called_one_with(b'\x7F\xF1\x00\x00')
self.assertFalse(transport.close.called)
receiver.assert_called_once_with(b'abcd')
def test_raw_socket_client_error(self):
class CP(RawSocketClientProtocol):
@property
def serializer_id(self):
return 1
client = CP()
on_hs = Mock()
transport = Mock(spec_set=('close', 'write', 'get_extra_info'))
receiver = Mock()
client.stringReceived = receiver
client._on_handshake_complete = on_hs
client.connection_made(transport)
client.data_received(b'\x7F\xF1\x00\x01')
transport.close.assert_called_once_with()
def test_wamp(self):
transport = Mock(spec_set=('abort', 'close', 'write', 'get_extra_info'))
transport.write = Mock(side_effect=lambda m: messages.append(m))
client = Mock(spec=['onOpen', 'onMessage'])
def fact():
return client
messages = []
proto = WampRawSocketClientFactory(fact)()
proto.connection_made(transport)
self.assertTrue(proto._serializer)
s = proto._serializer.RAWSOCKET_SERIALIZER_ID
proto.data_received(bytes(bytearray([0x7F, 0xF0 | s, 0, 0])))
client.onOpen.assert_called_once_with(proto)
proto.send(message.Abort(u'close'))
for d in messages[1:]:
proto.data_received(d)
self.assertTrue(client.onMessage.called)
self.assertTrue(isinstance(client.onMessage.call_args[0][0], message.Abort))
# server
transport = Mock(spec_set=('abort', 'close', 'write', 'get_extra_info'))
transport.write = Mock(side_effect=lambda m: messages.append(m))
client = None
server = Mock(spec=['onOpen', 'onMessage'])
def fact_server():
return server
messages = []
proto = WampRawSocketServerFactory(fact_server)()
proto.connection_made(transport)
self.assertTrue(proto.factory._serializers)
s = proto.factory._serializers[1].RAWSOCKET_SERIALIZER_ID
proto.data_received(bytes(bytearray([0x7F, 0xF0 | s, 0, 0])))
self.assertTrue(proto._serializer)
server.onOpen.assert_called_once_with(proto)
proto.send(message.Abort(u'close'))
for d in messages[1:]:
proto.data_received(d)
self.assertTrue(server.onMessage.called)
self.assertTrue(isinstance(server.onMessage.call_args[0][0], message.Abort))
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.test_prefix']
main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2017 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import pytest
from socket import gethostname
from supervisor.process import Subprocess
from supervisor.states import SupervisorStates
from unittest.mock import call, patch, Mock
from supvisors.supervisordata import *
@pytest.fixture
def source(supervisor, supvisors):
""" Return the instance to test. """
supervisor.supvisors = supvisors
return SupervisorData(supvisors, supervisor)
def test_unix_server(mocker, supervisor, supvisors):
""" Test that using UNIX HTTP server is not compliant with the use of Supvisors. """
mocker.patch.dict(supervisor.options.server_configs[0], {'section': 'unix_http_server'})
with pytest.raises(ValueError):
SupervisorData(supvisors, supervisor)
def test_creation(supervisor, source):
""" Test the values set at construction. """
assert source.supervisord is supervisor
assert source.server_config is source.supervisord.options.server_configs[0]
assert source._supervisor_rpc_interface is None
assert source._supvisors_rpc_interface is None
assert source.disabilities == {}
def test_accessors(source):
""" Test the accessors. """
# test consistence with DummySupervisor configuration
assert source.httpserver is source.supervisord.options.httpserver
assert source.supervisor_rpc_interface.rpc_name == 'supervisor_RPC'
assert source.supvisors_rpc_interface.rpc_name == 'supvisors_RPC'
assert source.serverurl == f'http://{gethostname()}:65000'
assert source.serverport == 65000
assert source.username == 'user'
assert source.password == 'p@$$w0rd'
assert source.supervisor_state == SupervisorStates.RUNNING
def test_env(source):
""" Test the environment build. """
assert source.get_env() == {'SUPERVISOR_SERVER_URL': f'http://{gethostname()}:65000',
'SUPERVISOR_USERNAME': 'user', 'SUPERVISOR_PASSWORD': 'p@$$w0rd'}
def test_update_supervisor(mocker, source):
""" Test the update_supervisor method. """
mocked_replace = mocker.patch.object(source, 'replace_default_handler')
mocked_update = mocker.patch.object(source, 'update_internal_data')
source.update_supervisor()
assert mocked_replace.called
assert mocked_update.called
def test_close_server(source):
""" Test the closing of supervisord HTTP servers. """
# keep reference to http servers
http_servers = source.supervisord.options.httpservers
assert source.supervisord.options.storage is None
# call the method
source.close_httpservers()
# test the result
assert source.supervisord.options.storage is not None
assert source.supervisord.options.storage is http_servers
assert source.supervisord.options.httpservers == ()
def test_process(source):
""" Test the access of a supervisord process. """
# test unknown application and process
with pytest.raises(KeyError):
source._get_process('unknown_application:unknown_process')
with pytest.raises(KeyError):
source._get_process('dummy_application:unknown_process')
# test normal behaviour
app_config = source.supervisord.process_groups['dummy_application']
assert source._get_process('dummy_application:dummy_process_1') is app_config.processes['dummy_process_1']
assert source._get_process('dummy_application:dummy_process_2') is app_config.processes['dummy_process_2']
def test_process_config(source):
""" Test the access of a group configuration. """
# test unknown application and process
with pytest.raises(KeyError):
source._get_process_config('unknown_application:unknown_process')
with pytest.raises(KeyError):
source._get_process_config('dummy_application:unknown_process')
# test normal behaviour
config = source._get_process_config('dummy_application:dummy_process_1')
assert config.autorestart
assert config.command == 'ls'
config = source._get_process_config('dummy_application:dummy_process_2')
assert not config.autorestart
assert config.command == 'cat'
def test_autorestart(source):
""" Test the autostart value of a process configuration. """
# test unknown application and process
with pytest.raises(KeyError):
source.autorestart('unknown_application:unknown_process')
with pytest.raises(KeyError):
source.autorestart('dummy_application:unknown_process')
# test normal behaviour
assert source.autorestart('dummy_application:dummy_process_1')
assert not source.autorestart('dummy_application:dummy_process_2')
def test_disable_autorestart(source):
""" Test the disable of the autostart of a process configuration. """
# test unknown application and process
with pytest.raises(KeyError):
source.disable_autorestart('unknown_application:unknown_process')
with pytest.raises(KeyError):
source.disable_autorestart('dummy_application:unknown_process')
# test normal behaviour
assert source.autorestart('dummy_application:dummy_process_1')
source.disable_autorestart('dummy_application:dummy_process_1')
assert not source.autorestart('dummy_application:dummy_process_1')
def test_extra_args(source):
""" Test the extra arguments functionality. """
# test initial status
assert not any(hasattr(process.config, 'command_ref') or hasattr(process.config, 'extra_args')
for appli in source.supervisord.process_groups.values()
for process in appli.processes.values())
# add context to one group of the internal data
source.supvisors.server_options.processes_program = {'dummy_process_1': 'dummy_process',
'dummy_process_2': 'dummy_process'}
source.update_internal_data('dummy_application')
# test internal data: 'dummy_application' processes should have additional attributes
assert all(hasattr(process.config, 'command_ref') and hasattr(process.config, 'extra_args')
for process in source.supervisord.process_groups['dummy_application'].processes.values())
# add context to internal data
source.update_internal_data()
# test internal data: all should have additional attributes
assert all(hasattr(process.config, 'command_ref') and hasattr(process.config, 'extra_args')
for appli in source.supervisord.process_groups.values()
for process in appli.processes.values())
# test unknown application and process
with pytest.raises(KeyError):
source.update_extra_args('unknown_application:unknown_process', '-la')
with pytest.raises(KeyError):
source.update_extra_args('dummy_application:unknown_process', '-la')
# test normal behaviour
namespec = 'dummy_application:dummy_process_1'
config = source._get_process_config(namespec)
# add extra arguments
source.update_extra_args(namespec, '-la')
# test access
assert source.get_process_config_options(namespec, ['extra_args']) == {'extra_args': '-la'}
# test internal data
assert config.command == 'ls -la'
assert config.command_ref == 'ls'
assert config.extra_args == '-la'
# remove them
source.update_extra_args(namespec, '')
# test access
assert source.get_process_config_options(namespec, ['extra_args']) == {'extra_args': ''}
# test internal data
assert config.command == 'ls'
assert config.command_ref == 'ls'
assert config.extra_args == ''
def test_update_numprocs(mocker, source):
""" Test the possibility to update numprocs. """
# get patches
mocked_obsolete = mocker.patch.object(source, '_get_obsolete_processes', return_value=['dummy_program_2'])
mocked_add = mocker.patch.object(source, '_add_processes', return_value=['dummy_program_3'])
# set context
source.supervisord.supvisors.server_options.program_processes = program_processes = {}
program_processes['dummy_program'] = {'dummy_group': ['dummy_program_1', 'dummy_program_2']}
# test numprocs increase
assert source.update_numprocs('dummy_program', 3) == (['dummy_program_3'], [])
assert not mocked_obsolete.called
assert mocked_add.call_args_list == [call('dummy_program', 3, 2, ['dummy_group'])]
mocker.resetall()
# test numprocs decrease
assert source.update_numprocs('dummy_program', 1) == ([], ['dummy_program_2'])
assert mocked_obsolete.call_args_list == [call('dummy_program', 1,
{'dummy_group': ['dummy_program_1', 'dummy_program_2']})]
assert not mocked_add.called
mocker.resetall()
# test numprocs identity
source.update_numprocs('dummy_program', 2)
assert not mocked_obsolete.called
assert not mocked_add.called
def test_add_processes(mocker, source):
""" Test the possibility to increase numprocs. """
# get the patches
mocked_update = source.supervisord.supvisors.server_options.update_numprocs
mocked_update.return_value = 'program:dummy_program'
mocked_reload = source.supervisord.supvisors.server_options.reload_processes_from_section
process_1, process_2 = Mock(), Mock()
mocked_reload.return_value = [process_1, process_2]
expected = ['dummy_group:dummy_program_01', 'dummy_group:dummy_program_02']
mocked_add = mocker.patch.object(source, '_add_supervisor_processes', return_value=expected)
# test call
assert source._add_processes('dummy_program', 2, 1, ['dummy_group']) == expected
assert mocked_update.call_args_list == [call('dummy_program', 2)]
assert mocked_reload.call_args_list == [call('program:dummy_program', 'dummy_group')]
assert mocked_add.call_args_list == [call('dummy_program', 'dummy_group', [process_2])]
def test_add_supervisor_processes(mocker, source):
""" Test the possibility to increase numprocs. """
# get the patches
mocked_notify = mocker.patch('supvisors.supervisordata.notify')
# set context
process_1, process_2 = Mock(), Mock()
program_2 = Mock(command='bin/program_2', **{'make_process.return_value': process_2})
program_2.name = 'dummy_program_02'
source.supervisord.process_groups = {'dummy_group': Mock(processes={'dummy_program_01': process_1},
config=Mock(process_configs=[process_1]))}
source.disabilities['dummy_program'] = True
# test call
expected = ['dummy_group:dummy_program_02']
assert source._add_supervisor_processes('dummy_program', 'dummy_group', [program_2]) == expected
assert program_2.options == source.supervisord.options
assert program_2.command_ref == 'bin/program_2'
assert program_2.extra_args == ''
assert program_2.disabled
assert program_2.create_autochildlogs.call_args_list == [call()]
assert source.supervisord.process_groups['dummy_group'].processes == {'dummy_program_01': process_1,
'dummy_program_02': process_2}
notify_call = mocked_notify.call_args_list[0][0][0]
assert isinstance(notify_call, ProcessAddedEvent)
assert notify_call.process is process_2
def test_get_obsolete_processes(source):
""" Test getting the obsolete processes before decreasing numprocs. """
mocked_update = source.supervisord.supvisors.server_options.update_numprocs
mocked_update.return_value = 'program:dummy_program'
mocked_reload = source.supervisord.supvisors.server_options.reload_processes_from_section
# set context
process_1, process_2 = Mock(), Mock()
process_1.name = 'dummy_process_1'
process_2.name = 'dummy_process_2'
program_configs = {'dummy_group': [process_1, process_2]}
# test call
assert source._get_obsolete_processes('dummy_program', 1, program_configs) == ['dummy_group:dummy_process_2']
assert mocked_update.call_args_list == [call('dummy_program', 1)]
assert mocked_reload.call_args_list == [call('program:dummy_program', 'dummy_group')]
def test_delete_processes(mocker, source):
""" Test the possibility to decrease numprocs. """
# get the patches
mocked_notify = mocker.patch('supvisors.supervisordata.notify')
# set context
process_1, process_2, process_3 = Mock(), Mock(), Mock()
source.supervisord.process_groups = {'dummy_group': Mock(processes={'dummy_program_01': process_1,
'dummy_program_02': process_2,
'dummy_program_03': process_3})}
# test call
source.delete_processes(['dummy_group:dummy_program_02', 'dummy_group:dummy_program_03'])
notify_call_1 = mocked_notify.call_args_list[0][0][0]
assert isinstance(notify_call_1, ProcessRemovedEvent)
assert notify_call_1. process is process_2
notify_call_2 = mocked_notify.call_args_list[1][0][0]
assert isinstance(notify_call_2, ProcessRemovedEvent)
assert notify_call_2. process is process_3
assert source.supervisord.process_groups['dummy_group'].processes == {'dummy_program_01': process_1}
def test_force_fatal(source):
""" Test the way to force a process in FATAL state. """
# test unknown application and process
with pytest.raises(KeyError):
source.force_process_fatal('unknown_application:unknown_process', 'crash')
with pytest.raises(KeyError):
source.force_process_fatal('dummy_application:unknown_process', 'crash')
# test normal behaviour
process_1 = source._get_process('dummy_application:dummy_process_1')
assert process_1.state == 'STOPPED'
assert process_1.spawnerr == ''
source.force_process_fatal('dummy_application:dummy_process_1', 'crash')
assert process_1.state == 'FATAL'
assert process_1.spawnerr == 'crash'
# restore configuration
process_1.state = 'STOPPED'
process_1.spawnerr = ''
def test_replace_handler(source):
""" Test the autostart value of a process configuration. """
# keep reference to handler
assert isinstance(source.supervisord.options.httpserver.handlers[1], Mock)
# check method behaviour with authentication server
source.replace_default_handler()
# keep reference to handler
assert isinstance(source.supervisord.options.httpserver.handlers[1], supervisor_auth_handler)
# check method behaviour with authentication server
with patch.dict(source.server_config, {'username': None}):
source.replace_default_handler()
# keep reference to handler
assert isinstance(source.supervisord.options.httpserver.handlers[1], default_handler.default_handler)
def test_get_subprocesses(source):
""" Test the get_subprocesses method. """
# set context
dummy_program_1, dummy_program_2 = Mock(), Mock()
dummy_program_1.name = 'dummy_program_1'
dummy_program_2.name = 'dummy_program_2'
source.supervisord.supvisors.server_options.program_processes = program_processes = {}
program_processes['dummy_program'] = {'dummy_group': [dummy_program_1, dummy_program_2]}
assert source.get_subprocesses('dummy_program') == ['dummy_group:dummy_program_1', 'dummy_group:dummy_program_2']
def test_enable_disable(mocker, source):
""" Test the disabling / enabling of a program. """
mocked_notify = mocker.patch('supvisors.supervisordata.notify')
mocked_write = mocker.patch.object(source, 'write_disabilities')
# test initial status
assert not any(hasattr(process.config, 'disabled')
for appli in source.supervisord.process_groups.values()
for process in appli.processes.values())
# add context to one group of the internal data
source.supvisors.server_options.processes_program = {'dummy_process_1': 'dummy_process',
'dummy_process_2': 'dummy_process'}
source.update_internal_data('dummy_application')
# test internal data: 'dummy_application' processes should have additional attributes
assert all(not process.config.disabled
for process in source.supervisord.process_groups['dummy_application'].processes.values())
# add context to internal data
source.update_internal_data()
# test internal data: all should have additional attributes
assert all(not process.config.disabled
for appli in source.supervisord.process_groups.values()
for process in appli.processes.values())
# test unknown program
source.enable_program('unknown_program')
assert not mocked_notify.called
assert mocked_write.called
mocker.resetall()
assert source.disabilities == {'dummy_process': False, 'unknown_program': False}
assert all(not process.config.disabled
for appli in source.supervisord.process_groups.values()
for process in appli.processes.values())
source.disable_program('unknown_program')
assert not mocked_notify.called
assert mocked_write.called
mocker.resetall()
assert source.disabilities == {'dummy_process': False, 'unknown_program': True}
assert all(not process.config.disabled
for appli in source.supervisord.process_groups.values()
for process in appli.processes.values())
# test known program
source.disable_program('dummy_process')
expected = list(source.supvisors.server_options.processes_program.keys())
notify_call_1 = mocked_notify.call_args_list[0][0][0]
assert isinstance(notify_call_1, ProcessDisabledEvent)
assert notify_call_1.process.config.name in expected
expected.remove(notify_call_1.process.config.name)
notify_call_2 = mocked_notify.call_args_list[1][0][0]
assert isinstance(notify_call_2, ProcessDisabledEvent)
assert notify_call_2.process.config.name in expected
assert mocked_write.called
mocker.resetall()
assert source.disabilities == {'dummy_process': True, 'unknown_program': True}
assert all(process.config.disabled
for process in source.supervisord.process_groups['dummy_application'].processes.values())
source.enable_program('dummy_process')
expected = list(source.supvisors.server_options.processes_program.keys())
notify_call_1 = mocked_notify.call_args_list[0][0][0]
assert isinstance(notify_call_1, ProcessEnabledEvent)
assert notify_call_1.process.config.name in expected
expected.remove(notify_call_1.process.config.name)
notify_call_2 = mocked_notify.call_args_list[1][0][0]
assert isinstance(notify_call_2, ProcessEnabledEvent)
assert notify_call_2.process.config.name in expected
assert mocked_write.called
mocker.resetall()
assert source.disabilities == {'dummy_process': False, 'unknown_program': True}
assert all(not process.config.disabled and process.laststart == 0 and process.state == 'STARTING'
for process in source.supervisord.process_groups['dummy_application'].processes.values())
def test_disabilities_serialization(mocker, source):
""" Test the serialization of the disabilities. """
# patch open
mocked_open = mocker.patch('builtins.open', mocker.mock_open())
# read_disabilities has already been called once in the constructor based on a non-existing file
assert source.disabilities == {}
# fill context and write
source.disabilities['program_1'] = True
source.disabilities['program_2'] = False
source.write_disabilities()
mocked_open.assert_called_once_with(source.supvisors.options.disabilities_file, 'w+')
handle = mocked_open()
json_expected = '{"program_1": true, "program_2": false}'
assert handle.write.call_args_list == [call(json_expected)]
# empty context and read
mocked_open = mocker.patch('builtins.open', mocker.mock_open(read_data=json_expected))
mocker.patch('os.path.isfile', return_value=True)
source.disabilities = {}
source.read_disabilities()
assert source.disabilities == {'program_1': True, 'program_2': False}
mocked_open.assert_called_once_with(source.supvisors.options.disabilities_file)
handle = mocked_open()
assert handle.read.call_args_list == [call()]
# test with disabilities files not set
source.supvisors.options.disabilities_file = None
source.disabilities = {}
source.read_disabilities()
assert source.disabilities == {}
def test_spawn(mocker):
""" Test the spawn method.
This method is designed to be added to Supervisor by monkeypatch. """
Subprocess._spawn, Subprocess.spawn = Subprocess.spawn, spawn
# create a disabled process
process = Subprocess(Mock(disabled=True))
# patch the legacy Subprocess
mocked_spawn = mocker.patch.object(process, '_spawn', return_value='spawned')
# check that spawn does not work
assert process.spawn() is None
assert not mocked_spawn.called
# enable the process
process.config.disabled = False
# check that spawn does work
assert process.spawn() == 'spawned'
assert mocked_spawn.called
|
|
from __future__ import absolute_import, unicode_literals
import copy
import json
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models.sql import Query
from django.db.models.sql.constants import SINGLE
from django.utils.crypto import get_random_string
from django.utils.six.moves.urllib.parse import urlparse
from elasticsearch import Elasticsearch, NotFoundError
from elasticsearch.helpers import bulk
from wagtail.utils.utils import deep_update
from wagtail.wagtailsearch.backends.base import (
BaseSearchBackend, BaseSearchQuery, BaseSearchResults)
from wagtail.wagtailsearch.index import (
FilterField, Indexed, RelatedFields, SearchField, class_is_indexed)
class ElasticsearchMapping(object):
type_map = {
'AutoField': 'integer',
'BinaryField': 'binary',
'BooleanField': 'boolean',
'CharField': 'string',
'CommaSeparatedIntegerField': 'string',
'DateField': 'date',
'DateTimeField': 'date',
'DecimalField': 'double',
'FileField': 'string',
'FilePathField': 'string',
'FloatField': 'double',
'IntegerField': 'integer',
'BigIntegerField': 'long',
'IPAddressField': 'string',
'GenericIPAddressField': 'string',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'integer',
'SlugField': 'string',
'SmallIntegerField': 'integer',
'TextField': 'string',
'TimeField': 'date',
}
keyword_type = 'string'
text_type = 'string'
set_index_not_analyzed_on_filter_fields = True
# Contains the configuration required to use the edgengram_analyzer
# on a field. It's different in Elasticsearch 2 so it's been put in
# an attribute here to make it easier to override in a subclass.
edgengram_analyzer_config = {
'index_analyzer': 'edgengram_analyzer',
}
def __init__(self, model):
self.model = model
def get_parent(self):
for base in self.model.__bases__:
if issubclass(base, Indexed) and issubclass(base, models.Model):
return type(self)(base)
def get_document_type(self):
return self.model.indexed_get_content_type()
def get_field_column_name(self, field):
if isinstance(field, FilterField):
return field.get_attname(self.model) + '_filter'
elif isinstance(field, SearchField):
return field.get_attname(self.model)
elif isinstance(field, RelatedFields):
return field.field_name
def get_field_mapping(self, field):
if isinstance(field, RelatedFields):
mapping = {'type': 'nested', 'properties': {}}
nested_model = field.get_field(self.model).related_model
nested_mapping = type(self)(nested_model)
for sub_field in field.fields:
sub_field_name, sub_field_mapping = nested_mapping.get_field_mapping(sub_field)
mapping['properties'][sub_field_name] = sub_field_mapping
return self.get_field_column_name(field), mapping
else:
mapping = {'type': self.type_map.get(field.get_type(self.model), 'string')}
if isinstance(field, SearchField):
if mapping['type'] == 'string':
mapping['type'] = self.text_type
if field.boost:
mapping['boost'] = field.boost
if field.partial_match:
mapping.update(self.edgengram_analyzer_config)
mapping['include_in_all'] = True
elif isinstance(field, FilterField):
if mapping['type'] == 'string':
mapping['type'] = self.keyword_type
if self.set_index_not_analyzed_on_filter_fields:
# Not required on ES5 as that uses the "keyword" type for
# filtered string fields
mapping['index'] = 'not_analyzed'
mapping['include_in_all'] = False
if 'es_extra' in field.kwargs:
for key, value in field.kwargs['es_extra'].items():
mapping[key] = value
return self.get_field_column_name(field), mapping
def get_mapping(self):
# Make field list
fields = {
'pk': dict(type=self.keyword_type, store=True, include_in_all=False),
'content_type': dict(type=self.keyword_type, include_in_all=False),
'_partials': dict(type=self.text_type, include_in_all=False),
}
fields['_partials'].update(self.edgengram_analyzer_config)
if self.set_index_not_analyzed_on_filter_fields:
# Not required on ES5 as that uses the "keyword" type for
# filtered string fields
fields['pk']['index'] = 'not_analyzed'
fields['content_type']['index'] = 'not_analyzed'
fields.update(dict(
self.get_field_mapping(field) for field in self.model.get_search_fields()
))
return {
self.get_document_type(): {
'properties': fields,
}
}
def get_document_id(self, obj):
return obj.indexed_get_toplevel_content_type() + ':' + str(obj.pk)
def _get_nested_document(self, fields, obj):
doc = {}
partials = []
model = type(obj)
mapping = type(self)(model)
for field in fields:
value = field.get_value(obj)
doc[mapping.get_field_column_name(field)] = value
# Check if this field should be added into _partials
if isinstance(field, SearchField) and field.partial_match:
partials.append(value)
return doc, partials
def get_document(self, obj):
# Build document
doc = dict(pk=str(obj.pk), content_type=self.model.indexed_get_content_type())
partials = []
for field in self.model.get_search_fields():
value = field.get_value(obj)
if isinstance(field, RelatedFields):
if isinstance(value, models.Manager):
nested_docs = []
for nested_obj in value.all():
nested_doc, extra_partials = self._get_nested_document(field.fields, nested_obj)
nested_docs.append(nested_doc)
partials.extend(extra_partials)
value = nested_docs
elif isinstance(value, models.Model):
value, extra_partials = self._get_nested_document(field.fields, value)
partials.extend(extra_partials)
doc[self.get_field_column_name(field)] = value
# Check if this field should be added into _partials
if isinstance(field, SearchField) and field.partial_match:
partials.append(value)
# Add partials to document
doc['_partials'] = partials
return doc
def __repr__(self):
return '<ElasticsearchMapping: %s>' % (self.model.__name__, )
class ElasticsearchSearchQuery(BaseSearchQuery):
mapping_class = ElasticsearchMapping
DEFAULT_OPERATOR = 'or'
def __init__(self, *args, **kwargs):
super(ElasticsearchSearchQuery, self).__init__(*args, **kwargs)
self.mapping = self.mapping_class(self.queryset.model)
# Convert field names into index column names
if self.fields:
fields = []
searchable_fields = {f.field_name: f for f in self.queryset.model.get_searchable_search_fields()}
for field_name in self.fields:
if field_name in searchable_fields:
field_name = self.mapping.get_field_column_name(searchable_fields[field_name])
fields.append(field_name)
self.fields = fields
def _process_lookup(self, field, lookup, value):
column_name = self.mapping.get_field_column_name(field)
if lookup == 'exact':
if value is None:
return {
'missing': {
'field': column_name,
}
}
else:
return {
'term': {
column_name: value,
}
}
if lookup == 'isnull':
if value:
return {
'missing': {
'field': column_name,
}
}
else:
return {
'exists': {
'field': column_name,
}
}
if lookup in ['startswith', 'prefix']:
return {
'prefix': {
column_name: value,
}
}
if lookup in ['gt', 'gte', 'lt', 'lte']:
return {
'range': {
column_name: {
lookup: value,
}
}
}
if lookup == 'range':
lower, upper = value
return {
'range': {
column_name: {
'gte': lower,
'lte': upper,
}
}
}
if lookup == 'in':
if isinstance(value, Query):
db_alias = self.queryset._db or DEFAULT_DB_ALIAS
value = (value.get_compiler(db_alias)
.execute_sql(result_type=SINGLE))
elif not isinstance(value, list):
value = list(value)
return {
'terms': {
column_name: value,
}
}
def _connect_filters(self, filters, connector, negated):
if filters:
if len(filters) == 1:
filter_out = filters[0]
else:
filter_out = {
connector.lower(): [
fil for fil in filters if fil is not None
]
}
if negated:
filter_out = {
'not': filter_out
}
return filter_out
def get_inner_query(self):
if self.query_string is not None:
fields = self.fields or ['_all', '_partials']
if len(fields) == 1:
if self.operator == 'or':
query = {
'match': {
fields[0]: self.query_string,
}
}
else:
query = {
'match': {
fields[0]: {
'query': self.query_string,
'operator': self.operator,
}
}
}
else:
query = {
'multi_match': {
'query': self.query_string,
'fields': fields,
}
}
if self.operator != 'or':
query['multi_match']['operator'] = self.operator
else:
query = {
'match_all': {}
}
return query
def get_content_type_filter(self):
return {
'prefix': {
'content_type': self.queryset.model.indexed_get_content_type()
}
}
def get_filters(self):
filters = []
# Filter by content type
filters.append(self.get_content_type_filter())
# Apply filters from queryset
queryset_filters = self._get_filters_from_queryset()
if queryset_filters:
filters.append(queryset_filters)
return filters
def get_query(self):
inner_query = self.get_inner_query()
filters = self.get_filters()
if len(filters) == 1:
return {
'filtered': {
'query': inner_query,
'filter': filters[0],
}
}
elif len(filters) > 1:
return {
'filtered': {
'query': inner_query,
'filter': {
'and': filters,
}
}
}
else:
return inner_query
def get_sort(self):
# Ordering by relevance is the default in Elasticsearch
if self.order_by_relevance:
return
# Get queryset and make sure its ordered
if self.queryset.ordered:
order_by_fields = self.queryset.query.order_by
sort = []
for order_by_field in order_by_fields:
reverse = False
field_name = order_by_field
if order_by_field.startswith('-'):
reverse = True
field_name = order_by_field[1:]
field = self._get_filterable_field(field_name)
column_name = self.mapping.get_field_column_name(field)
sort.append({
column_name: 'desc' if reverse else 'asc'
})
return sort
else:
# Order by pk field
return ['pk']
def __repr__(self):
return json.dumps(self.get_query())
class ElasticsearchSearchResults(BaseSearchResults):
fields_param_name = 'fields'
def _get_es_body(self, for_count=False):
body = {
'query': self.query.get_query()
}
if not for_count:
sort = self.query.get_sort()
if sort is not None:
body['sort'] = sort
return body
def _do_search(self):
# Params for elasticsearch query
params = dict(
index=self.backend.get_index_for_model(self.query.queryset.model).name,
body=self._get_es_body(),
_source=False,
from_=self.start,
)
params[self.fields_param_name] = 'pk'
# Add size if set
if self.stop is not None:
params['size'] = self.stop - self.start
# Send to Elasticsearch
hits = self.backend.es.search(**params)
# Get pks from results
pks = [hit['fields']['pk'][0] for hit in hits['hits']['hits']]
scores = {str(hit['fields']['pk'][0]): hit['_score'] for hit in hits['hits']['hits']}
# Initialise results dictionary
results = dict((str(pk), None) for pk in pks)
# Find objects in database and add them to dict
queryset = self.query.queryset.filter(pk__in=pks)
for obj in queryset:
results[str(obj.pk)] = obj
if self._score_field:
setattr(obj, self._score_field, scores.get(str(obj.pk)))
# Return results in order given by Elasticsearch
return [results[str(pk)] for pk in pks if results[str(pk)]]
def _do_count(self):
# Get count
hit_count = self.backend.es.count(
index=self.backend.get_index_for_model(self.query.queryset.model).name,
body=self._get_es_body(for_count=True),
)['count']
# Add limits
hit_count -= self.start
if self.stop is not None:
hit_count = min(hit_count, self.stop - self.start)
return max(hit_count, 0)
class ElasticsearchIndex(object):
def __init__(self, backend, name):
self.backend = backend
self.es = backend.es
self.mapping_class = backend.mapping_class
self.name = name
def put(self):
self.es.indices.create(self.name, self.backend.settings)
def delete(self):
try:
self.es.indices.delete(self.name)
except NotFoundError:
pass
def exists(self):
return self.es.indices.exists(self.name)
def is_alias(self):
return self.es.indices.exists_alias(self.name)
def aliased_indices(self):
"""
If this index object represents an alias (which appear the same in the
Elasticsearch API), this method can be used to fetch the list of indices
the alias points to.
Use the is_alias method if you need to find out if this an alias. This
returns an empty list if called on an index.
"""
return [
self.backend.index_class(self.backend, index_name)
for index_name in self.es.indices.get_alias(name=self.name).keys()
]
def put_alias(self, name):
"""
Creates a new alias to this index. If the alias already exists it will
be repointed to this index.
"""
self.es.indices.put_alias(name=name, index=self.name)
def add_model(self, model):
# Get mapping
mapping = self.mapping_class(model)
# Put mapping
self.es.indices.put_mapping(
index=self.name, doc_type=mapping.get_document_type(), body=mapping.get_mapping()
)
def add_item(self, item):
# Make sure the object can be indexed
if not class_is_indexed(item.__class__):
return
# Get mapping
mapping = self.mapping_class(item.__class__)
# Add document to index
self.es.index(
self.name, mapping.get_document_type(), mapping.get_document(item), id=mapping.get_document_id(item)
)
def add_items(self, model, items):
if not class_is_indexed(model):
return
# Get mapping
mapping = self.mapping_class(model)
doc_type = mapping.get_document_type()
# Create list of actions
actions = []
for item in items:
# Create the action
action = {
'_index': self.name,
'_type': doc_type,
'_id': mapping.get_document_id(item),
}
action.update(mapping.get_document(item))
actions.append(action)
# Run the actions
bulk(self.es, actions)
def delete_item(self, item):
# Make sure the object can be indexed
if not class_is_indexed(item.__class__):
return
# Get mapping
mapping = self.mapping_class(item.__class__)
# Delete document
try:
self.es.delete(
self.name,
mapping.get_document_type(),
mapping.get_document_id(item),
)
except NotFoundError:
pass # Document doesn't exist, ignore this exception
def refresh(self):
self.es.indices.refresh(self.name)
def reset(self):
# Delete old index
self.delete()
# Create new index
self.put()
class ElasticsearchIndexRebuilder(object):
def __init__(self, index):
self.index = index
def reset_index(self):
self.index.reset()
def start(self):
# Reset the index
self.reset_index()
return self.index
def finish(self):
self.index.refresh()
class ElasticsearchAtomicIndexRebuilder(ElasticsearchIndexRebuilder):
def __init__(self, index):
self.alias = index
self.index = index.backend.index_class(
index.backend,
self.alias.name + '_' + get_random_string(7).lower()
)
def reset_index(self):
# Delete old index using the alias
# This should delete both the alias and the index
self.alias.delete()
# Create new index
self.index.put()
# Create a new alias
self.index.put_alias(self.alias.name)
def start(self):
# Create the new index
self.index.put()
return self.index
def finish(self):
self.index.refresh()
if self.alias.is_alias():
# Update existing alias, then delete the old index
# Find index that alias currently points to, we'll delete it after
# updating the alias
old_index = self.alias.aliased_indices()
# Update alias to point to new index
self.index.put_alias(self.alias.name)
# Delete old index
# aliased_indices() can return multiple indices. Delete them all
for index in old_index:
if index.name != self.index.name:
index.delete()
else:
# self.alias doesn't currently refer to an alias in Elasticsearch.
# This means that either nothing exists in ES with that name or
# there is currently an index with the that name
# Run delete on the alias, just in case it is currently an index.
# This happens on the first rebuild after switching ATOMIC_REBUILD on
self.alias.delete()
# Create the alias
self.index.put_alias(self.alias.name)
class ElasticsearchSearchBackend(BaseSearchBackend):
index_class = ElasticsearchIndex
query_class = ElasticsearchSearchQuery
results_class = ElasticsearchSearchResults
mapping_class = ElasticsearchMapping
basic_rebuilder_class = ElasticsearchIndexRebuilder
atomic_rebuilder_class = ElasticsearchAtomicIndexRebuilder
settings = {
'settings': {
'analysis': {
'analyzer': {
'ngram_analyzer': {
'type': 'custom',
'tokenizer': 'lowercase',
'filter': ['asciifolding', 'ngram']
},
'edgengram_analyzer': {
'type': 'custom',
'tokenizer': 'lowercase',
'filter': ['asciifolding', 'edgengram']
}
},
'tokenizer': {
'ngram_tokenizer': {
'type': 'nGram',
'min_gram': 3,
'max_gram': 15,
},
'edgengram_tokenizer': {
'type': 'edgeNGram',
'min_gram': 2,
'max_gram': 15,
'side': 'front'
}
},
'filter': {
'ngram': {
'type': 'nGram',
'min_gram': 3,
'max_gram': 15
},
'edgengram': {
'type': 'edgeNGram',
'min_gram': 1,
'max_gram': 15
}
}
}
}
}
def __init__(self, params):
super(ElasticsearchSearchBackend, self).__init__(params)
# Get settings
self.hosts = params.pop('HOSTS', None)
self.index_name = params.pop('INDEX', 'wagtail')
self.timeout = params.pop('TIMEOUT', 10)
if params.pop('ATOMIC_REBUILD', False):
self.rebuilder_class = self.atomic_rebuilder_class
else:
self.rebuilder_class = self.basic_rebuilder_class
# If HOSTS is not set, convert URLS setting to HOSTS
es_urls = params.pop('URLS', ['http://localhost:9200'])
if self.hosts is None:
self.hosts = []
for url in es_urls:
parsed_url = urlparse(url)
use_ssl = parsed_url.scheme == 'https'
port = parsed_url.port or (443 if use_ssl else 80)
http_auth = None
if parsed_url.username is not None and parsed_url.password is not None:
http_auth = (parsed_url.username, parsed_url.password)
self.hosts.append({
'host': parsed_url.hostname,
'port': port,
'url_prefix': parsed_url.path,
'use_ssl': use_ssl,
'verify_certs': use_ssl,
'http_auth': http_auth,
})
self.settings = copy.deepcopy(self.settings) # Make the class settings attribute as instance settings attribute
self.settings = deep_update(self.settings, params.pop("INDEX_SETTINGS", {}))
# Get Elasticsearch interface
# Any remaining params are passed into the Elasticsearch constructor
options = params.pop('OPTIONS', {})
self.es = Elasticsearch(
hosts=self.hosts,
timeout=self.timeout,
**options)
def get_index_for_model(self, model):
return self.index_class(self, self.index_name)
def get_index(self):
return self.index_class(self, self.index_name)
def get_rebuilder(self):
return self.rebuilder_class(self.get_index())
def reset_index(self):
# Use the rebuilder to reset the index
self.get_rebuilder().reset_index()
def add_type(self, model):
self.get_index_for_model(model).add_model(model)
def refresh_index(self):
self.get_index().refresh()
def add(self, obj):
self.get_index_for_model(type(obj)).add_item(obj)
def add_bulk(self, model, obj_list):
self.get_index_for_model(model).add_items(model, obj_list)
def delete(self, obj):
self.get_index_for_model(type(obj)).delete_item(obj)
SearchBackend = ElasticsearchSearchBackend
|
|
# -*- coding: utf-8 -*-
import collections
import datetime
import os.path
import string
import matplotlib.dates
import matplotlib.pyplot as plt
import numpy
import operator
import pandas
from midas.compat import imap
from midas.compat import str_type
from midas.see5 import calculate_recall_precision
from midas.see5 import calculate_tpr
from midas.see5 import calculate_fpr
from midas.tools import iter_files_content
from midas.pig_schema import FLATTENED_PARSER
from midas.pig_schema import SITES_W_COMPANY_PARSER
def iter_sites_w_company(directory_or_file):
contents = iter_files_content(directory_or_file)
for swc in imap(SITES_W_COMPANY_PARSER, contents):
ranks = map(operator.attrgetter('rank'), swc.ranking)
index = pandas.DatetimeIndex(map(operator.attrgetter('tstamp'),
swc.ranking))
ts = pandas.Series(ranks, index=index)
tstamp = pandas.Timestamp(swc.tstamp)
yield (swc.site, ts, swc.company, swc.code, tstamp)
##################################
## Funding Rounds per date Plot ##
##################################
def make_fr_per_date_plot(companies, plot_file=None):
contents = iter_files_content(companies)
d = collections.defaultdict(list)
min_date = datetime.date(2011, 3, 1)
months = set()
for c in imap(FLATTENED_PARSER, contents):
if c.tstamp >= min_date:
d[c.code].append(matplotlib.dates.date2num(c.tstamp))
months.add(datetime.date(c.tstamp.year, c.tstamp.month, 1))
months = sorted(months)
right_border = months[-1] + datetime.timedelta(31)
right_border = datetime.date(right_border.year, right_border.month, 1)
months.append(right_border)
fig = plt.figure(figsize=(4*1.4, 3*1.4))
ax = fig.add_subplot(111)
ax.hist(d.values(), label=map(str.title, d.keys()),
bins=matplotlib.dates.date2num(months))
ax.set_xlim(matplotlib.dates.date2num(months[0]),
matplotlib.dates.date2num(months[-1]))
ax.legend()
ax.xaxis.set_major_locator(
matplotlib.dates.MonthLocator(bymonthday=15, interval=2)
)
ax.xaxis.set_major_formatter(
matplotlib.ticker.FuncFormatter(
lambda d, _: matplotlib.dates.num2date(d).strftime('%B %Y')
)
)
fig.autofmt_xdate()
ax.set_ylabel('Number of Funding Rounds')
ax.grid(True, axis='y')
if plot_file:
fig.savefig(plot_file)
return fig
#########################################
## Available Days Before Funding Round ##
#########################################
def get_available_days_before_fr(ts, fr):
site, date, code = fr
date = pandas.Timestamp(date)
ts_site = ts[site].dropna()
return code, (ts_site.index[0] - date).days
def make_available_days_before_funding_rounds_plot_data(sites_w_company):
collected = collections.defaultdict(list)
for site, ts, company, code, tstamp in sites_w_company:
ts = ts.dropna()
available_days = (ts.index[0] - tstamp).days
if available_days > 365:
available_days = 400
elif available_days < 0:
available_days = -40
collected[code].append(available_days)
return collected
def make_available_days_before_funding_rounds_plot(sites_w_company,
plot_file=None):
data = make_available_days_before_funding_rounds_plot_data(
sites_w_company
)
fig = plt.figure()
ax = fig.add_subplot('111')
res = ax.hist(data.values(),
bins=10,
histtype='bar',
label=map(string.capitalize, data.keys()),
log=True)
ax.legend(loc='best')
ax.set_ylabel('Number of Funding Rounds')
ax.set_xlabel('Number of Days')
ax.grid(which='both')
if plot_file:
fig.savefig(plot_file)
return fig
#########################################
## Median of Rank before Funding Round ##
#########################################
def make_final_rank_before_funding_plot(sites_w_company):
before_days = [5, 95, 215]
offset_days = 10
data = [ make_rank_before_funding_plot_data(sites_w_company,
days,
offset_days)
for days in before_days ]
fig = plt.figure()
axes = [ fig.add_subplot(len(before_days), 1, i)
for i in range(1, len(before_days) + 1) ]
bins = range(0, 1000001, 100000)
for ax, d, days in zip(axes, data, before_days):
ax.hist(d.values(), bins=bins, label=map(string.capitalize, d.keys()))
ax.legend(loc='best')
ax.grid(True)
title = make_rank_before_funding_plot_title(days, offset_days)
ax.set_title(title, fontsize='medium')
axes[1].set_ylabel('Number of Funding Rounds', fontsize='x-large')
axes[2].set_xlabel('Rank', fontsize='x-large')
return fig
def make_rank_before_funding_plot(sites_w_company,
before_days,
offset_days=10,
plot_file=None):
collected = make_rank_before_funding_plot_data(sites_w_company,
before_days,
offset_days)
fig = plt.figure()
ax = fig.add_subplot('111')
res = ax.hist(collected.values(),
bins=9,
label=map(string.capitalize, collected.keys()))
ax.legend(loc='best')
ax.grid(True)
ax.set_xlabel('Rank')
ax.set_ylabel('Number of Funding Rounds')
ax.set_title(make_rank_before_funding_plot_title(before_days,
offset_days))
if plot_file:
fig.savefig(plot_file)
return fig
def make_rank_before_funding_plot_fname(directory,
before_days,
days_offset=10,
prefix='rank_before_funding_plot'):
return os.path.join(directory,
'{0}_-_before_days_{1}_-_days_offset_{2}.png'\
.format(prefix, before_days, days_offset))
def make_rank_before_funding_plot_data(sites_w_company, before_days,
days_offset=10):
offset = pandas.DateOffset(days=days_offset)
start = pandas.DateOffset(days=before_days)
collected = collections.defaultdict(list)
for site, ts, company, code, tstamp in sites_w_company:
try:
median = median_rank_of_ts_in_period(ts, tstamp - start, offset)
except KeyError:
continue
if numpy.isnan(median):
continue
collected[code].append(median)
return collected
def median_rank_of_ts_in_period(ts, start_date, offset):
period = ts[start_date:(start_date + offset)].dropna()
return period.median()
def make_rank_before_funding_plot_title(before_days, offset_days):
end_days = before_days - offset_days
if 0 > offset_days:
raise ValueError('offset_days must greater than zero')
elif 0 < end_days < before_days:
first = before_days
snd = '{} days before'.format(end_days)
elif end_days < 0 < before_days:
first = '{0} days before'.format(before_days)
snd = '{0} days after'.format(-1*end_days)
elif end_days < 0 == before_days:
first = 'Fund Raise'
snd = '{} days after'.format(end_days * -1)
elif end_days == 0 < before_days:
first = '{} days before'.format(before_days)
snd = ''
title = 'Median of Rank from {} to {} Fund Raise'.format(first, snd)
return title
###########################
## Recall Precision Plot ##
###########################
def make_recall_precision_plot(results, plot_file=None):
"""
``results`` should be the result of `midas.see5.main`
"""
fig = plt.figure()
ax = fig.add_subplot('111')
ax.set_ylabel('Precision')
ax.set_xlabel('Recall')
for args, per_cost_result in results.items():
xs = []
ys = []
for cm in per_cost_result.values():
x, y = calculate_recall_precision(cm)
if numpy.isnan(y):
continue
xs.append(x)
ys.append(y)
if not isinstance(args, str_type):
args = ' '.join(args)
ax.plot(xs, ys, 'o', label=args)
ax.legend(loc='best')
ax.grid(True)
if plot_file:
fig.savefig(plot_file)
return fig
def make_tpr_fpr_plot(results, plot_file=None):
"""
``results`` should be the result of `midas.see5.main`
"""
fig = plt.figure()
ax = fig.add_subplot('111')
ax.set_ylabel('True Positive Rate')
ax.set_xlabel('False Positive Rate')
for args, per_cost_result in results.items():
xs = []
ys = []
for confusion_matrix in per_cost_result.values():
xs.append(calculate_fpr(confusion_matrix))
ys.append(calculate_tpr(confusion_matrix))
if not isinstance(args, str_type):
args = ' '.join(args)
ax.plot(xs, ys, 'o', label=args)
ax.legend(loc='best')
ax.grid(True)
ax.plot([0.0, 0.5, 1.0], [0.0, 0.5, 1.0], ':k')
if plot_file:
fig.savefig(plot_file)
return fig
|
|
##
# \namespace cross3d.softimage.softimagesceneobject
#
# \remarks The SoftimageSceneObject class provides the implementation of the AbstractSceneObject class as it applies
# to Softimage
#
# \author douglas
# \author Blur Studio
# \date 04/04/11
#
#------------------------------------------------------------------------------------------------------------------------
from cross3d import application
from cross3d.constants import ObjectType
from PySoftimage import xsi, xsiFactory, constants as xsiConstants
from win32com.client.dynamic import Dispatch as dynDispatch
from cross3d.abstract.abstractsceneobject import AbstractSceneObject
#------------------------------------------------------------------------------------------------------------------------
class SoftimageSceneObject(AbstractSceneObject):
_nativeToAbstractObjectType = { 'light' : ObjectType.Light,
'camera' : ObjectType.Camera,
'Thinking' : ObjectType.Particle | ObjectType.Thinking,
'PF_Source' : ObjectType.Particle,
'FumeFX' : ObjectType.FumeFX,
'polymsh' : ObjectType.Geometry,
'surfmsh' : ObjectType.NurbsSurface,
'crvlist' : ObjectType.Curve,
'#model' : ObjectType.Model,
'#Group' : ObjectType.Group,
'CameraInterest': ObjectType.CameraInterest }
_abstractToNativeObjectType = dict((v,k) for k, v in _nativeToAbstractObjectType.iteritems())
#------------------------------------------------------------------------------------------------------------------------
# protected methods
#------------------------------------------------------------------------------------------------------------------------
def _findNativeChild(self, name, recursive=False, parent=None):
"""
\remarks implements the AbstractSceneObject._findNativeChildren method to look up a specific native children for this object
\return <PySotimage.xsi.Object> nativeObject
"""
return self.nativePointer().FindChild(name, '', '', recursive)
def _nativeChildren(self, recursive=False, wildcard='', type='', parent='', childrenCollector=[]):
"""
\remarks implements the AbstractSceneObject._nativeChildren method to look up the native children for this object
\param recursive <bool> wildcard <string> type <string parent <string> childrenCollector <list>
\sa children
\return <list> [ <PySoftimage.xsi.Object> nativeObject, .. ]
"""
nativeType = ''
if type != '':
nativeType = self._nativeTypeOfObjectType(type)
#return [ obj for obj in self._nativePointer.FindChildren( name, nativeType, parent, recursive ) ]
return self._nativePointer.FindChildren2(wildcard, nativeType, '', recursive)
def _nativeParent(self):
"""
\remarks implements the AbstractSceneObject._nativeParent method to look up the native parent for this object
\sa parent, setParent, _setNativeParent
\return <PySoftimage.xsi.Object> nativeObject || None
"""
return self._nativePointer.Parent
def _setNativeParent(self, nativeParent):
"""
\remarks implements the AbstractSceneObject._setNativeParent method to set the native parent for this object
\sa parent, setParent, _nativeParent
\param <PySoftimage.xsi.Object> nativeObject || None
\return <bool> success
"""
if nativeParent is None:
nativeParent = xsi.ActiveSceneRoot
# Making sure the object is not already a child of the parent, otherwise Softimage throws an error
if not self._nativePointer.Parent3DObject.IsEqualTo(nativeParent):
nativeParent.AddChild(self._nativePointer)
return True
def _nativeModel(self):
"""
\remarks implements the AbstractSceneObject._nativeModel method to look up the native model for this object
\sa children
\return <list> [ <PySotimage.xsi.Model> nativeObject, .. ]
"""
model = None
obj = self.nativePointer()
ignoreSceneRoot = True
if str(obj.Type) == "#model":
if ignoreSceneRoot is True and obj.Name == "Scene_Root":
model = None
else:
model = obj
else:
try:
if ignoreSceneRoot is True and obj.Model.Name == "Scene_Root":
model = None
else:
model = obj.Model
except AttributeError:
pass
return model
def _setNativeModel(self, nativeModel):
"""
"""
if self._nativePointer.Model.IsEqualTo(nativeModel):
return True
# Making sure the object is not already a child of the parent, otherwise Softimage throws an error
if not self._nativePointer.Parent3DObject.IsEqualTo(nativeModel):
nativeModel.AddChild(self._nativePointer)
return True
#------------------------------------------------------------------------------------------------------------------------
# public methods
#------------------------------------------------------------------------------------------------------------------------
def isDeleted(self):
return (self._nativePointer.Parent is None)
def _constrainingNativeObjects(self):
constraining = []
for constraint in self.Kinematics.Constraints:
constraining += [obj for obj in constraint.Constraining]
return constraining
def _constrainedNativeObjects(self):
constraineds = []
# TODO: Currently we only support "Pose Constraints".
constraints = xsi.FindObjects(None, "{D42BBF71-3C47-11D2-8B42-00A024EE586F}")
for constraint in constraints:
# This is a weird thing when looking for all object type constraint.
if constraint.FullName.startswith("TransientObjectContainer") or constraint.Parent3DObject is None:
continue
# Looping through constraining object.
for constraining in constraint.Constraining:
if constraining.isEqualTo(self._nativePointer):
constraineds.append(constraint.Constrained)
return constraineds
def getCacheName(self, type):
typeDic = { "Pc":".pc2",
"Tmc":".tmc",
"Abc":".abc",
"Icecache":".icecache"}
obj = self._nativePointer
name = obj.Fullname
cacheName = name.replace("." , "")
cacheName = cacheName + typeDic[type]
return cacheName
def deleteProperty(self, propertyName):
"""
\remarks implements the AbstractSceneObject.deleteProperty that deletes a property of this object.
\return <bool> success
"""
xsi.DeleteObj('.'.join([ self.name(), propertyName ]))
return True
def uniqueId(self):
"""
\remarks implements the AbstractSceneObject.uniqueId to look up the unique name for this object and returns it
\sa displayName, setDisplayName, setName
\return <str> name
"""
return self._nativePointer.ObjectID
def applyCache(self, path, type):
"""Applies cache to object
param <string>path , <string>types "Pc","Tmc","Icecache", "Abc", Type
return cache object
"""
obj = self._nativePointer
if type == "Pc":
cache = obj.ActivePrimitive.ConstructionHistory.Find("BlurPCOperator")
if not cache:
xsi.BlurPCAddDeformer(self._nativePointer)
cache = obj.ActivePrimitive.ConstructionHistory.Find("BlurPCOperator")
cache.Parameters("Filename").Value = path
#xsi.setValue((cache.Fullname +".Filename"), path)
return cache
else:
cache.Parameters("Filename").Value = path
return cache
elif type == "Tmc":
kine = obj.Kinematics.Global
tmcop = kine.NestedObjects("TmcOp")
if not tmcop:
cache = xsi.ApplyTmcOp(self._nativePointer)
tmcop = kine.NestedObjects("TmcOp")
tmcop.Parameters("Filename").Value = path
return tmcop
elif type == "Icecache":
cache = xsi.AddFileCacheSource(obj, path)
return cache
elif type == "abc":
print("unsupported")
return None
else:
print("unsupported cache Type")
return None
def parameters(self):
parameters = {}
for parameter in self._nativePointer.Parameters:
parameters[parameter.ScriptName] = parameter.Value
return parameters
def setParameters(self, parameters):
for key, value in parameters.iteritems():
try:
self._nativePointer.Parameters(key).Value = value
except:
print 'TRACEBACK: skipping param: {} {}...'.format(key, value)
print traceback.format_exc()
def resetTransforms(self, pos=True, rot=True, scl=True):
"""
Resets the transforms to zero.
"""
if pos and rot and scl:
xsi.ResetTransform(self._nativePointer, "siObj", "siSRT", "siXYZ")
else:
if pos:
xsi.ResetTransform(self._nativePointer, "siObj", "siTrn", "siXYZ")
if rot:
xsi.ResetTransform(self._nativePointer, "siObj", "siRot", "siXYZ")
if pos:
xsi.ResetTransform(self._nativePointer, "siObj", "siScl", "siXYZ")
return True
def rotation(self, local=False):
"""
Returns the rotation of the current object.
:param local: If True return the local rotation. Default False.
"""
if local:
trans = self._nativePointer.Kinematics.Local
else:
trans = self._nativePointer.Kinematics.Global
return trans.rotx.Value, trans.roty.Value, trans.rotz.Value
def setHidden(self, state):
"""Hides/unhides this object
"""
self._nativePointer.Properties('Visibility').Parameters('viewvis').SetValue(not state)
self._nativePointer.Properties('Visibility').Parameters('rendvis').SetValue(not state)
return True
def matchTransforms(self, obj, position=True, rotation=True, scale=True):
"""
Currently the auto-key support is a bit lite, but it should cover most of the cases.
"""
if position:
self._nativePointer.Kinematics.Global.Parameters('posx').Value = obj.nativePointer().Kinematics.Global.Parameters('posx').Value
self._nativePointer.Kinematics.Global.Parameters('posy').Value = obj.nativePointer().Kinematics.Global.Parameters('posy').Value
self._nativePointer.Kinematics.Global.Parameters('posz').Value = obj.nativePointer().Kinematics.Global.Parameters('posz').Value
if rotation:
self._nativePointer.Kinematics.Global.Parameters('rotx').Value = obj.nativePointer().Kinematics.Global.Parameters('rotx').Value
self._nativePointer.Kinematics.Global.Parameters('roty').Value = obj.nativePointer().Kinematics.Global.Parameters('roty').Value
self._nativePointer.Kinematics.Global.Parameters('rotz').Value = obj.nativePointer().Kinematics.Global.Parameters('rotz').Value
if scale:
self._nativePointer.Kinematics.Global.Parameters('sclx').Value = obj.nativePointer().Kinematics.Global.Parameters('sclx').Value
self._nativePointer.Kinematics.Global.Parameters('scly').Value = obj.nativePointer().Kinematics.Global.Parameters('scly').Value
self._nativePointer.Kinematics.Global.Parameters('sclz').Value = obj.nativePointer().Kinematics.Global.Parameters('sclz').Value
if application.autokey():
self.key()
return True
def key(self, target='keyable'):
"""
Set keys on the object parameters.
"""
xsi.SaveKeyOnKeyable(self._nativePointer)
def translation(self, local=False):
"""
Returns the translation of the current object.
:param local: If True return the local translation. Default False.
"""
if local:
trans = self._nativePointer.Kinematics.Local
else:
trans = self._nativePointer.Kinematics.Global
return trans.posx.Value, trans.posy.Value, trans.posz.Value
#------------------------------------------------------------------------------------------------------------------------
# class methods
#------------------------------------------------------------------------------------------------------------------------
@classmethod
def _typeOfNativeObject(cls, nativeObject):
"""
\remarks reimplements the AbstractSceneObject._typeOfNativeObject method to returns the ObjectType of the nativeObject applied
\param <PySoftimage.xsi.Object> nativeObject || None
\return <bool> success
"""
type = nativeObject.Type
abstractType = cls._nativeToAbstractObjectType.get(type)
if abstractType == None:
return AbstractSceneObject._typeOfNativeObject(nativeObject)
return abstractType
#------------------------------------------------------------------------------------------------------------------------
# static methods
#------------------------------------------------------------------------------------------------------------------------
@staticmethod
def _nativeTypeOfObjectType(objectType):
"""
\remarks reimplements the AbstractSceneObject._nativeTypeOfObjectType method to return the nativeType of the ObjectType supplied
\param <cross3d.constants.ObjectType> objectType || None
\return <bool> success
"""
if objectType == ObjectType.Geometry:
return 'polymsh'
elif objectType == ObjectType.Light:
return 'light'
elif objectType == ObjectType.Camera:
return 'camera'
elif objectType == ObjectType.Model:
return '#model'
elif objectType == ObjectType.Group:
return '#group'
elif objectType == ObjectType.NurbsSurface:
return 'surfmsh'
elif objectType == ObjectType.Curve:
return 'crvlist'
else:
return None
return AbstractSceneObject._nativeTypeOfObjectType(objectType)
def keyedFrames(self, start=None, end=None):
# Collecting the transform parameters with animation.
parameters = []
transformsGlobal = self._nativePointer.Kinematics.Global
transformsLocal = self._nativePointer.Kinematics.Local
for transform in [ 'pos', 'rot', 'scl' ]:
for axis in 'xyz':
parameterGlobal = transformsGlobal.Parameters(transform + axis)
parameterLocal = transformsLocal.Parameters(transform + axis)
if (parameterGlobal and parameterGlobal.IsAnimated(xsiConstants.siFCurveSource)) or (parameterLocal and parameterLocal.isAnimated(xsiConstants.siFCurveSource)):
parameters.append(parameterLocal)
# Collecting all curves for this parameters.
curves = []
for parameter in parameters:
for source in parameter.Sources:
if source.Type == 20:
curves.append(source)
# Collecting all frames with keys for these curves.
frames = set()
for curve in curves:
for key in curve.Keys:
frames.add(key.Time)
frames = list(frames)
frames = filter(lambda a: (start is None or start-1E-6 <= a) and (end is None or a <= end+1E-6), frames)
return sorted(frames)
# register the symbol
import cross3d
cross3d.registerSymbol('SceneObject', SoftimageSceneObject)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Plivo Team. See LICENSE.txt for details.
import os
import gevent
import configparser
import ujson as json
from flask import Flask, request, jsonify
from redis.exceptions import LockError
import traceback
from sharq import SharQ
class SharQServer(object):
"""Defines a HTTP based API on top of SharQ and
exposed the app to run the server.
"""
def __init__(self, config_path):
"""Load the SharQ config and define the routes."""
# read the configs required by sharq-server.
self.config = configparser.SafeConfigParser()
self.config.read(config_path)
# pass the config file to configure the SharQ core.
self.sq = SharQ(config_path)
self.app = Flask(__name__)
# set the routes
self.app.add_url_rule(
'/', view_func=self._view_index, methods=['GET'])
self.app.add_url_rule(
'/enqueue/<queue_type>/<queue_id>/',
view_func=self._view_enqueue, methods=['POST'])
self.app.add_url_rule(
'/dequeue/', defaults={'queue_type': 'default'},
view_func=self._view_dequeue, methods=['GET'])
self.app.add_url_rule(
'/dequeue/<queue_type>/',
view_func=self._view_dequeue, methods=['GET'])
self.app.add_url_rule(
'/finish/<queue_type>/<queue_id>/<job_id>/',
view_func=self._view_finish, methods=['POST'])
self.app.add_url_rule(
'/interval/<queue_type>/<queue_id>/',
view_func=self._view_interval, methods=['POST'])
self.app.add_url_rule(
'/metrics/', defaults={'queue_type': None, 'queue_id': None},
view_func=self._view_metrics, methods=['GET'])
self.app.add_url_rule(
'/metrics/<queue_type>/', defaults={'queue_id': None},
view_func=self._view_metrics, methods=['GET'])
self.app.add_url_rule(
'/metrics/<queue_type>/<queue_id>/',
view_func=self._view_metrics, methods=['GET'])
self.app.add_url_rule(
'/deletequeue/<queue_type>/<queue_id>/',
view_func=self._view_clear_queue, methods=['DELETE'])
self.app.add_url_rule(
'/deepstatus/',
view_func=self._view_deep_status, methods=['GET'])
def requeue(self):
"""Loop endlessly and requeue expired jobs."""
job_requeue_interval = float(
self.config.get('sharq', 'job_requeue_interval'))
while True:
try:
self.sq.requeue()
except Exception as e:
traceback.print_exc()
gevent.sleep(job_requeue_interval / 1000.00) # in seconds
def requeue_with_lock(self):
"""Loop endlessly and requeue expired jobs, but with a distributed lock"""
enable_requeue_script = self.config.get('sharq', 'enable_requeue_script')
if enable_requeue_script == "false":
print("requeue script disabled")
return
job_requeue_interval = float(
self.config.get('sharq', 'job_requeue_interval'))
print("start requeue loop: job_requeue_interval = %f" % (job_requeue_interval))
while True:
try:
with self.sq.redis_client().lock('sharq-requeue-lock-key', timeout=15):
try:
self.sq.requeue()
except Exception as e:
traceback.print_exc()
except LockError:
# the lock wasn't acquired within specified time
pass
finally:
gevent.sleep(job_requeue_interval / 1000.00) # in seconds
def _view_index(self):
"""Greetings at the index."""
return jsonify(**{'message': 'Hello, SharQ!'})
def _view_enqueue(self, queue_type, queue_id):
"""Enqueues a job into SharQ."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
except Exception as e:
response['message'] = e.message
return jsonify(**response), 400
request_data.update({
'queue_type': queue_type,
'queue_id': queue_id
})
try:
response = self.sq.enqueue(**request_data)
except Exception as e:
traceback.print_exc()
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response), 201
def _view_dequeue(self, queue_type):
"""Dequeues a job from SharQ."""
response = {
'status': 'failure'
}
request_data = {
'queue_type': queue_type
}
try:
response = self.sq.dequeue(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception as e:
import traceback
for line in traceback.format_exc().splitlines():
print(line)
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
def _view_finish(self, queue_type, queue_id, job_id):
"""Marks a job as finished in SharQ."""
response = {
'status': 'failure'
}
request_data = {
'queue_type': queue_type,
'queue_id': queue_id,
'job_id': job_id
}
try:
response = self.sq.finish(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception as e:
traceback.print_exc()
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
def _view_interval(self, queue_type, queue_id):
"""Updates the queue interval in SharQ."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
interval = request_data['interval']
except Exception as e:
response['message'] = e.message
return jsonify(**response), 400
request_data = {
'queue_type': queue_type,
'queue_id': queue_id,
'interval': interval
}
try:
response = self.sq.interval(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception as e:
traceback.print_exc()
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
def _view_metrics(self, queue_type, queue_id):
"""Gets SharQ metrics based on the params."""
response = {
'status': 'failure'
}
request_data = {}
if queue_type:
request_data['queue_type'] = queue_type
if queue_id:
request_data['queue_id'] = queue_id
try:
response = self.sq.metrics(**request_data)
except Exception as e:
traceback.print_exc()
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
def _view_deep_status(self):
"""Checks underlying data store health"""
try:
self.sq.deep_status()
response = {
'status': "success"
}
return jsonify(**response)
except Exception as e:
print(e)
import traceback
for line in traceback.format_exc().splitlines():
print(line)
raise Exception
def _view_clear_queue(self, queue_type, queue_id):
"""remove queue from SharQ based on the queue_type and queue_id."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
except Exception as e:
response['message'] = e.message
return jsonify(**response), 400
request_data.update({
'queue_type': queue_type,
'queue_id': queue_id
})
try:
response = self.sq.clear_queue(**request_data)
except Exception as e:
traceback.print_exc()
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
def setup_server(config_path):
"""Configure SharQ server, start the requeue loop
and return the server."""
# configure the SharQ server
server = SharQServer(config_path)
# start the requeue loop
gevent.spawn(server.requeue_with_lock)
return server
|
|
#!/usr/local/sci/bin/python2.7
#*****************************
#
# apply 5x5 climatology to 5x5 monthly fields
#
#
#************************************************************************
'''
Author: Robert Dunn
Created: March 2016
Last update: 11 Feb 2021
Location: /home/h04/hadkw/HadISDH_Code/MARINE_BUILD/EUSTACE_SST_MARINE/
-----------------------
CODE PURPOSE AND OUTPUT
-----------------------
Takes 5x5 monthly fields absolutes and anomalies and applies (subtracts) the 5x5 monthly climatology and
climatological average of the anomalies (1981-2010). This has the effect of producing derived anomalies relative to
the gridded climatology for comparison with the gridded anomalies (calculated at the individual observation
level) AND renormalising the gridded anomalies to have a zero mean over the 1981-2010 period. This is only really
essential for the first iteration where the ERA-Interim climatology has caused a bias in the anomalies. Ideally
we want to present the gridded anomalies because these mitigate errors from first gridding absolute values.
Can work with 5x5 monthly fields created from daily or monthly data and using relaxed or strict completeness
settings according to commandline switches.
-----------------------
LIST OF MODULES
-----------------------
utils.py
-----------------------
DATA
-----------------------
Input data and output data stored in:
/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/GRIDS2/
Requires 5x5 monthly grids and climatologies - either calculated from daily or monthly data
-----------------------
HOW TO RUN THE CODE
-----------------------
python2.7 apply_5x5monthly_climatology.py --suffix relax --period day --daily
python2.7 apply_5x5monthly_climatology.py --help
will show all options
-----------------------
OUTPUT
-----------------------
/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/GRIDS2/
Plots to appear in
/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/PLOTS2/
-----------------------
VERSION/RELEASE NOTES
-----------------------
Version 4 (11 Feb 2021) Kate Willett
---------
Enhancements
Changes
This now creates new actuals (rather than anomalies from Actuals-climatology) by adding climatology (read in existing) and adding to
renormalised anomalies. This is desireable because we want to use the renormalised anomalies and not the actuals - climatology to
take advantage of the gridded anomalies that are more robust to sampling bias than the actuals. Also, by creating gridded actuals from
gridded anomalies + climatology we're reducing the effects of sampling bias from gridded actuals.
Bug fixes
Version 3 (11 May 2020) Kate Willett
---------
Enhancements
This can do doNOWHOLE - a bias corrected version with all whole number flagged data removed
Changes
Bug fixes
Version 2 (26 Sep 2016) Kate Willett
---------
Enhancements
This can now cope with the iterative approach (doQC1it, doQC2it, doQC3it in addition to doQC and doBC),
It can also cope with doBCtotal, doBChgt and doBCscn
It can also be run for ShipOnly
In reality we should only need to run it for the first iteration.
Look for # KATE modified
...
# end
Changes
I have changed the output name to include the word 'renorm19812010'. This will need to be changed if a different climatology
period is used.
Now assumes day and night are also 90 to -90 latitude for consistency with both
Bug fixes
Version 1 (release date)
---------
Enhancements
Changes
Bug fixes
-----------------------
OTHER INFORMATION
-----------------------
'''
import os
import datetime as dt
import numpy as np
import sys
import argparse
import matplotlib
matplotlib.use('Agg')
import calendar
import netCDF4 as ncdf
import copy
import utils
import set_paths_and_vars
defaults = set_paths_and_vars.set()
OBS_ORDER = utils.make_MetVars(defaults.mdi, multiplier = False)
# what size grid (lat/lon)
DELTA_LAT = 5
DELTA_LON = 5
# set up the grid
# set up the grid
# KATE modified - flipped the lats to go 90 to -90
grid_lats = np.arange(90 - DELTA_LAT, -90 - DELTA_LAT, -DELTA_LAT)
#grid_lats = np.arange(-90 + DELTA_LAT, 90 + DELTA_LAT, DELTA_LAT)
# end
grid_lons = np.arange(-180 + DELTA_LAT, 180 + DELTA_LON, DELTA_LON)
# subroutine start
#*********************************************
# KATE modified
def apply_climatology(suffix = "relax", period = "both", daily = False,
doQC = False, doQC1it = False, doQC2it = False, doQC3it = False,
doBC = False, doBCtotal = False, doBChgt = False, doBCscn = False, doNOWHOLE = False,
ShipOnly = False):
#doQC = False, doBC = False):
# end
'''
Apply monthly 5x5 climatology
:param str suffix: "relax" or "strict" criteria
:param str period: which period to do day/night/both?
:param bool daily: run in 1x1 daily --> 5x5 monthly data
:param bool doQC: incorporate the QC flags or not
# KATE modified
:param bool doQC1it: incorporate the 1st iteration QC flags or not
:param bool doQC2it: incorporate the 2nd iteration QC flags or not
:param bool doQC3it: incorporate the 3rd iteration QC flags or not
# end
:param bool doBC: work on the bias corrected data
# KATE modified
:param bool doBCtotal: work on the bias corrected data
:param bool doBChgt: work on the hieght only bias corrected data
:param bool doBCscn: work on the screen only bias corrected data
# end
:param bool doNOWHOLE: work on the bias corrected data with all whole number flagged data removed
# KATE modified
:param bool ShipOnly: work on the ship platform type only data
# end
:returns:
'''
# KATE modified
settings = set_paths_and_vars.set(doBC = doBC, doBCtotal = doBCtotal, doBChgt = doBChgt, doBCscn = doBCscn, doNOWHOLE = doNOWHOLE,
doQC = doQC, doQC1it = doQC1it, doQC2it = doQC2it, doQC3it = doQC3it,
ShipOnly = ShipOnly)
#settings = set_paths_and_vars.set(doBC = doBC, doQC = doQC)
# end
if suffix == "relax":
N_YEARS_PRESENT = 10 # number of years present to calculate climatology
elif suffix == "strict":
N_YEARS_PRESENT = 15 # number of years present to calculate climatology
print "Do daily: {}".format(daily)
# set filenames
if daily:
climfilename = settings.DATA_LOCATION + "{}_5x5_monthly_climatology_from_daily_{}_{}.nc".format(settings.OUTROOT, period, suffix)
obsfilename = settings.DATA_LOCATION + "{}_5x5_monthly_from_daily_{}_{}.nc".format(settings.OUTROOT, period, suffix)
else:
climfilename = settings.DATA_LOCATION + "{}_5x5_monthly_climatology_{}_{}.nc".format(settings.OUTROOT, period, suffix)
obsfilename = settings.DATA_LOCATION + "{}_5x5_monthly_from_daily_{}_{}.nc".format(settings.OUTROOT, period, suffix)
# load netCDF files
clim_file = ncdf.Dataset(climfilename,'r', format='NETCDF4')
obs_file = ncdf.Dataset(obsfilename,'r', format='NETCDF4')
# simple - use a list and append
all_anoms = []
# spin through all variables
for v, var in enumerate(OBS_ORDER):
print var.name
obs = obs_file.variables[var.name][:]
clims = clim_file.variables[var.name][:]
# NEW BIT TO SET ACTUALS TO RENORMALISED ANOMALIES + CLIMATOLOGY
# If we're working with the actuals:
if ('anomalies' not in var.name):
obsanoms = obs_file.variables[var.name+'_anomalies'][:]
climsanoms = clim_file.variables[var.name+'_anomalies'][:]
renormanomalies = obsanoms - np.tile(climsanoms, (obsanoms.shape[0]/12,1,1)) # make to same shape
anomalies = renormanomalies + np.tile(clims, (renormanomalies.shape[0]/12,1,1)) # make to same shape
# If we're working with the anomalies
else:
anomalies = obs - np.tile(clims, (obs.shape[0]/12,1,1)) # make to same shape
all_anoms += [anomalies]
# finished - convert list to array
all_anoms = np.ma.array(all_anoms)
# extract remaining information to copy across
n_obs = obs_file.variables["n_obs"][:]
n_grids = obs_file.variables["n_grids"][:]
# set up the time object and axis
intimes = obs_file.variables["time"]
times = utils.TimeVar("time", intimes.long_name, intimes.units, intimes.standard_name)
times.data = intimes[:]
# write file
if daily:
# KATE modified - added renorm19812010 to the filename
out_filename = settings.DATA_LOCATION + settings.OUTROOT + "_5x5_monthly_renorm19812010_anomalies_from_daily_{}_{}.nc".format(period, suffix)
#out_filename = settings.DATA_LOCATION + settings.OUTROOT + "_5x5_monthly_anomalies_from_daily_{}_{}.nc".format(period, suffix)
# end
else:
# KATE modified - added renorm19812010 to the filename
out_filename = settings.DATA_LOCATION + settings.OUTROOT + "_5x5_monthly_renorm19812010_anomalies_{}_{}.nc".format(period, suffix)
#out_filename = settings.DATA_LOCATION + settings.OUTROOT + "_5x5_monthly_anomalies_{}_{}.nc".format(period, suffix)
# end
# KATE modified - only outputting 90 to -90 now and have changed grid_lats above
utils.netcdf_write(out_filename, all_anoms, n_grids, n_obs, OBS_ORDER, grid_lats, grid_lons, times, frequency = "Y")
#if period == "both":
# utils.netcdf_write(out_filename, all_anoms, n_grids, n_obs, OBS_ORDER, grid_lats[::-1], grid_lons, times, frequency = "Y")
#else:
# utils.netcdf_write(out_filename, all_anoms, n_grids, n_obs, OBS_ORDER, grid_lats, grid_lons, times, frequency = "Y")
# end
return # apply_climatology
#************************************************************************
if __name__=="__main__":
import argparse
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--suffix', dest='suffix', action='store', default = "relax",
help='"relax" or "strict" completeness, default = relax')
parser.add_argument('--period', dest='period', action='store', default = "both",
help='which period to run for (day/night/all), default = "both"')
parser.add_argument('--daily', dest='daily', action='store_true', default = False,
help='run on 1x1 daily --> 5x5 monthly data (rather than 1x1 monthly --> 5x5 monthly), default = False')
parser.add_argument('--doQC', dest='doQC', action='store_true', default = False,
help='process the QC information, default = False')
# KATE modified
parser.add_argument('--doBCtotal', dest='doBCtotal', action='store_true', default = False,
help='process the bias corrected data, default = False')
parser.add_argument('--doBChgt', dest='doBChgt', action='store_true', default = False,
help='process the height only bias corrected data, default = False')
parser.add_argument('--doBCscn', dest='doBCscn', action='store_true', default = False,
help='process the screen only bias corrected data, default = False')
# end
parser.add_argument('--doNOWHOLE', dest='doNOWHOLE', action='store_true', default = False,
help='process the bias corrected data with all whole number flagged data removed, default = False')
parser.add_argument('--doBC', dest='doBC', action='store_true', default = False,
help='process the bias corrected data, default = False')
# KATE modified
parser.add_argument('--doQC1it', dest='doQC1it', action='store_true', default = False,
help='process the first iteration QC information, default = False')
parser.add_argument('--doQC2it', dest='doQC2it', action='store_true', default = False,
help='process the second iteration QC information, default = False')
parser.add_argument('--doQC3it', dest='doQC3it', action='store_true', default = False,
help='process the third iteration QC information, default = False')
# end
# KATE modified
parser.add_argument('--ShipOnly', dest='ShipOnly', action='store_true', default = False,
help='process the ship platform type only data, default = False')
# end
args = parser.parse_args()
apply_climatology(suffix = str(args.suffix), period = str(args.period), daily = args.daily, \
# KATE modified
doQC = args.doQC, doQC1it = args.doQC1it, doQC2it = args.doQC2it, doQC3it = args.doQC3it,
doBC = args.doBC, doBCtotal = args.doBCtotal, doBCscn = args.doBCscn, doBChgt = args.doBChgt, doNOWHOLE = args.doNOWHOLE,
ShipOnly = args.ShipOnly)
#doQC = args.doQC, doBC = args.doBC)
# end
# END
# ************************************************************************
|
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake Smart Dictionary Implementation
Functions for interacting with the data structure used by the
BitBake build tools.
"""
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2004, 2005 Seb Frankengul
# Copyright (C) 2005, 2006 Holger Hans Peter Freyther
# Copyright (C) 2005 Uli Luckas
# Copyright (C) 2005 ROAD GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
import copy, re, sys, traceback
from collections import MutableMapping
import logging
import hashlib
import bb, bb.codeparser
from bb import utils
from bb.COW import COWDictBase
logger = logging.getLogger("BitBake.Data")
__setvar_keyword__ = ["_append", "_prepend"]
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?$')
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
__expand_python_regexp__ = re.compile(r"\${@.+?}")
def infer_caller_details(loginfo, parent = False, varval = True):
"""Save the caller the trouble of specifying everything."""
# Save effort.
if 'ignore' in loginfo and loginfo['ignore']:
return
# If nothing was provided, mark this as possibly unneeded.
if not loginfo:
loginfo['ignore'] = True
return
# Infer caller's likely values for variable (var) and value (value),
# to reduce clutter in the rest of the code.
if varval and ('variable' not in loginfo or 'detail' not in loginfo):
try:
raise Exception
except Exception:
tb = sys.exc_info()[2]
if parent:
above = tb.tb_frame.f_back.f_back
else:
above = tb.tb_frame.f_back
lcls = above.f_locals.items()
for k, v in lcls:
if k == 'value' and 'detail' not in loginfo:
loginfo['detail'] = v
if k == 'var' and 'variable' not in loginfo:
loginfo['variable'] = v
# Infer file/line/function from traceback
if 'file' not in loginfo:
depth = 3
if parent:
depth = 4
file, line, func, text = traceback.extract_stack(limit = depth)[0]
loginfo['file'] = file
loginfo['line'] = line
if func not in loginfo:
loginfo['func'] = func
class VariableParse:
def __init__(self, varname, d, val = None):
self.varname = varname
self.d = d
self.value = val
self.references = set()
self.execs = set()
def var_sub(self, match):
key = match.group()[2:-1]
if self.varname and key:
if self.varname == key:
raise Exception("variable %s references itself!" % self.varname)
var = self.d.getVar(key, True)
if var is not None:
self.references.add(key)
return var
else:
return match.group()
def python_sub(self, match):
code = match.group()[3:-1]
codeobj = compile(code.strip(), self.varname or "<expansion>", "eval")
parser = bb.codeparser.PythonParser(self.varname, logger)
parser.parse_python(code)
if self.varname:
vardeps = self.d.getVarFlag(self.varname, "vardeps", True)
if vardeps is None:
parser.log.flush()
else:
parser.log.flush()
self.references |= parser.references
self.execs |= parser.execs
value = utils.better_eval(codeobj, DataContext(self.d))
return str(value)
class DataContext(dict):
def __init__(self, metadata, **kwargs):
self.metadata = metadata
dict.__init__(self, **kwargs)
self['d'] = metadata
def __missing__(self, key):
value = self.metadata.getVar(key, True)
if value is None or self.metadata.getVarFlag(key, 'func'):
raise KeyError(key)
else:
return value
class ExpansionError(Exception):
def __init__(self, varname, expression, exception):
self.expression = expression
self.variablename = varname
self.exception = exception
if varname:
if expression:
self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception)
else:
self.msg = "Failure expanding variable %s: %s: %s" % (varname, type(exception).__name__, exception)
else:
self.msg = "Failure expanding expression %s which triggered exception %s: %s" % (expression, type(exception).__name__, exception)
Exception.__init__(self, self.msg)
self.args = (varname, expression, exception)
def __str__(self):
return self.msg
class IncludeHistory(object):
def __init__(self, parent = None, filename = '[TOP LEVEL]'):
self.parent = parent
self.filename = filename
self.children = []
self.current = self
def copy(self):
new = IncludeHistory(self.parent, self.filename)
for c in self.children:
new.children.append(c)
return new
def include(self, filename):
newfile = IncludeHistory(self.current, filename)
self.current.children.append(newfile)
self.current = newfile
return self
def __enter__(self):
pass
def __exit__(self, a, b, c):
if self.current.parent:
self.current = self.current.parent
else:
bb.warn("Include log: Tried to finish '%s' at top level." % filename)
return False
def emit(self, o, level = 0):
"""Emit an include history file, and its children."""
if level:
spaces = " " * (level - 1)
o.write("# %s%s" % (spaces, self.filename))
if len(self.children) > 0:
o.write(" includes:")
else:
o.write("#\n# INCLUDE HISTORY:\n#")
level = level + 1
for child in self.children:
o.write("\n")
child.emit(o, level)
class VariableHistory(object):
def __init__(self, dataroot):
self.dataroot = dataroot
self.variables = COWDictBase.copy()
def copy(self):
new = VariableHistory(self.dataroot)
new.variables = self.variables.copy()
return new
def record(self, *kwonly, **loginfo):
if not self.dataroot._tracking:
return
if len(kwonly) > 0:
raise TypeError
infer_caller_details(loginfo, parent = True)
if 'ignore' in loginfo and loginfo['ignore']:
return
if 'op' not in loginfo or not loginfo['op']:
loginfo['op'] = 'set'
if 'detail' in loginfo:
loginfo['detail'] = str(loginfo['detail'])
if 'variable' not in loginfo or 'file' not in loginfo:
raise ValueError("record() missing variable or file.")
var = loginfo['variable']
if var not in self.variables:
self.variables[var] = []
self.variables[var].append(loginfo.copy())
def variable(self, var):
if var in self.variables:
return self.variables[var]
else:
return []
def emit(self, var, oval, val, o):
history = self.variable(var)
commentVal = re.sub('\n', '\n#', str(oval))
if history:
if len(history) == 1:
o.write("#\n# $%s\n" % var)
else:
o.write("#\n# $%s [%d operations]\n" % (var, len(history)))
for event in history:
# o.write("# %s\n" % str(event))
if 'func' in event:
# If we have a function listed, this is internal
# code, not an operation in a config file, and the
# full path is distracting.
event['file'] = re.sub('.*/', '', event['file'])
display_func = ' [%s]' % event['func']
else:
display_func = ''
if 'flag' in event:
flag = '[%s] ' % (event['flag'])
else:
flag = ''
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
if len(history) > 1:
o.write("# computed:\n")
o.write('# "%s"\n' % (commentVal))
else:
o.write("#\n# $%s\n# [no history recorded]\n#\n" % var)
o.write('# "%s"\n' % (commentVal))
def get_variable_files(self, var):
"""Get the files where operations are made on a variable"""
var_history = self.variable(var)
files = []
for event in var_history:
files.append(event['file'])
return files
def get_variable_lines(self, var, f):
"""Get the line where a operation is made on a variable in file f"""
var_history = self.variable(var)
lines = []
for event in var_history:
if f== event['file']:
line = event['line']
lines.append(line)
return lines
def del_var_history(self, var):
if var in self.variables:
self.variables[var] = []
class DataSmart(MutableMapping):
def __init__(self, special = COWDictBase.copy(), seen = COWDictBase.copy() ):
self.dict = {}
self.inchistory = IncludeHistory()
self.varhistory = VariableHistory(self)
self._tracking = False
# cookie monster tribute
self._special_values = special
self._seen_overrides = seen
self.expand_cache = {}
def enableTracking(self):
self._tracking = True
def disableTracking(self):
self._tracking = False
def expandWithRefs(self, s, varname):
if not isinstance(s, basestring): # sanity check
return VariableParse(varname, self, s)
if varname and varname in self.expand_cache:
return self.expand_cache[varname]
varparse = VariableParse(varname, self)
while s.find('${') != -1:
olds = s
try:
s = __expand_var_regexp__.sub(varparse.var_sub, s)
s = __expand_python_regexp__.sub(varparse.python_sub, s)
if s == olds:
break
except ExpansionError:
raise
except bb.parse.SkipPackage:
raise
except Exception as exc:
raise ExpansionError(varname, s, exc)
varparse.value = s
if varname:
self.expand_cache[varname] = varparse
return varparse
def expand(self, s, varname = None):
return self.expandWithRefs(s, varname).value
def finalize(self, parent = False):
"""Performs final steps upon the datastore, including application of overrides"""
overrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
finalize_caller = {
'op': 'finalize',
}
infer_caller_details(finalize_caller, parent = parent, varval = False)
#
# Well let us see what breaks here. We used to iterate
# over each variable and apply the override and then
# do the line expanding.
# If we have bad luck - which we will have - the keys
# where in some order that is so important for this
# method which we don't have anymore.
# Anyway we will fix that and write test cases this
# time.
#
# First we apply all overrides
# Then we will handle _append and _prepend
#
# We only want to report finalization once per variable overridden.
finalizes_reported = {}
for o in overrides:
# calculate '_'+override
l = len(o) + 1
# see if one should even try
if o not in self._seen_overrides:
continue
vars = self._seen_overrides[o].copy()
for var in vars:
name = var[:-l]
try:
# Report only once, even if multiple changes.
if name not in finalizes_reported:
finalizes_reported[name] = True
finalize_caller['variable'] = name
finalize_caller['detail'] = 'was: ' + str(self.getVar(name, False))
self.varhistory.record(**finalize_caller)
# Copy history of the override over.
for event in self.varhistory.variable(var):
loginfo = event.copy()
loginfo['variable'] = name
loginfo['op'] = 'override[%s]:%s' % (o, loginfo['op'])
self.varhistory.record(**loginfo)
self.setVar(name, self.getVar(var, False), op = 'finalize', file = 'override[%s]' % o, line = '')
self.delVar(var)
except Exception:
logger.info("Untracked delVar")
# now on to the appends and prepends
for op in __setvar_keyword__:
if op in self._special_values:
appends = self._special_values[op] or []
for append in appends:
keep = []
for (a, o) in self.getVarFlag(append, op) or []:
match = True
if o:
for o2 in o.split("_"):
if not o2 in overrides:
match = False
if not match:
keep.append((a ,o))
continue
if op == "_append":
sval = self.getVar(append, False) or ""
sval += a
self.setVar(append, sval)
elif op == "_prepend":
sval = a + (self.getVar(append, False) or "")
self.setVar(append, sval)
# We save overrides that may be applied at some later stage
if keep:
self.setVarFlag(append, op, keep, ignore=True)
else:
self.delVarFlag(append, op, ignore=True)
def initVar(self, var):
self.expand_cache = {}
if not var in self.dict:
self.dict[var] = {}
def _findVar(self, var):
dest = self.dict
while dest:
if var in dest:
return dest[var]
if "_data" not in dest:
break
dest = dest["_data"]
def _makeShadowCopy(self, var):
if var in self.dict:
return
local_var = self._findVar(var)
if local_var:
self.dict[var] = copy.copy(local_var)
else:
self.initVar(var)
def setVar(self, var, value, **loginfo):
#print("var=" + str(var) + " val=" + str(value))
if 'op' not in loginfo:
loginfo['op'] = "set"
self.expand_cache = {}
match = __setvar_regexp__.match(var)
if match and match.group("keyword") in __setvar_keyword__:
base = match.group('base')
keyword = match.group("keyword")
override = match.group('add')
l = self.getVarFlag(base, keyword) or []
l.append([value, override])
self.setVarFlag(base, keyword, l, ignore=True)
# And cause that to be recorded:
loginfo['detail'] = value
loginfo['variable'] = base
if override:
loginfo['op'] = '%s[%s]' % (keyword, override)
else:
loginfo['op'] = keyword
self.varhistory.record(**loginfo)
# todo make sure keyword is not __doc__ or __module__
# pay the cookie monster
try:
self._special_values[keyword].add(base)
except KeyError:
self._special_values[keyword] = set()
self._special_values[keyword].add(base)
return
if not var in self.dict:
self._makeShadowCopy(var)
# more cookies for the cookie monster
if '_' in var:
override = var[var.rfind('_')+1:]
if len(override) > 0:
if override not in self._seen_overrides:
self._seen_overrides[override] = set()
self._seen_overrides[override].add( var )
# setting var
self.dict[var]["_content"] = value
self.varhistory.record(**loginfo)
def getVar(self, var, expand=False, noweakdefault=False):
value = self.getVarFlag(var, "_content", False, noweakdefault)
# Call expand() separately to make use of the expand cache
if expand and value:
return self.expand(value, var)
return value
def renameVar(self, key, newkey, **loginfo):
"""
Rename the variable key to newkey
"""
val = self.getVar(key, 0)
if val is not None:
loginfo['variable'] = newkey
loginfo['op'] = 'rename from %s' % key
loginfo['detail'] = val
self.varhistory.record(**loginfo)
self.setVar(newkey, val, ignore=True)
for i in ('_append', '_prepend'):
src = self.getVarFlag(key, i)
if src is None:
continue
dest = self.getVarFlag(newkey, i) or []
dest.extend(src)
self.setVarFlag(newkey, i, dest, ignore=True)
if i in self._special_values and key in self._special_values[i]:
self._special_values[i].remove(key)
self._special_values[i].add(newkey)
loginfo['variable'] = key
loginfo['op'] = 'rename (to)'
loginfo['detail'] = newkey
self.varhistory.record(**loginfo)
self.delVar(key, ignore=True)
def appendVar(self, var, value, **loginfo):
loginfo['op'] = 'append'
self.varhistory.record(**loginfo)
newvalue = (self.getVar(var, False) or "") + value
self.setVar(var, newvalue, ignore=True)
def prependVar(self, var, value, **loginfo):
loginfo['op'] = 'prepend'
self.varhistory.record(**loginfo)
newvalue = value + (self.getVar(var, False) or "")
self.setVar(var, newvalue, ignore=True)
def delVar(self, var, **loginfo):
loginfo['detail'] = ""
loginfo['op'] = 'del'
self.varhistory.record(**loginfo)
self.expand_cache = {}
self.dict[var] = {}
if '_' in var:
override = var[var.rfind('_')+1:]
if override and override in self._seen_overrides and var in self._seen_overrides[override]:
self._seen_overrides[override].remove(var)
def setVarFlag(self, var, flag, value, **loginfo):
if 'op' not in loginfo:
loginfo['op'] = "set"
loginfo['flag'] = flag
self.varhistory.record(**loginfo)
if not var in self.dict:
self._makeShadowCopy(var)
self.dict[var][flag] = value
def getVarFlag(self, var, flag, expand=False, noweakdefault=False):
local_var = self._findVar(var)
value = None
if local_var:
if flag in local_var:
value = copy.copy(local_var[flag])
elif flag == "_content" and "defaultval" in local_var and not noweakdefault:
value = copy.copy(local_var["defaultval"])
if expand and value:
value = self.expand(value, None)
return value
def delVarFlag(self, var, flag, **loginfo):
local_var = self._findVar(var)
if not local_var:
return
if not var in self.dict:
self._makeShadowCopy(var)
if var in self.dict and flag in self.dict[var]:
loginfo['detail'] = ""
loginfo['op'] = 'delFlag'
loginfo['flag'] = flag
self.varhistory.record(**loginfo)
del self.dict[var][flag]
def appendVarFlag(self, var, flag, value, **loginfo):
loginfo['op'] = 'append'
loginfo['flag'] = flag
self.varhistory.record(**loginfo)
newvalue = (self.getVarFlag(var, flag, False) or "") + value
self.setVarFlag(var, flag, newvalue, ignore=True)
def prependVarFlag(self, var, flag, value, **loginfo):
loginfo['op'] = 'prepend'
loginfo['flag'] = flag
self.varhistory.record(**loginfo)
newvalue = value + (self.getVarFlag(var, flag, False) or "")
self.setVarFlag(var, flag, newvalue, ignore=True)
def setVarFlags(self, var, flags, **loginfo):
infer_caller_details(loginfo)
if not var in self.dict:
self._makeShadowCopy(var)
for i in flags:
if i == "_content":
continue
loginfo['flag'] = i
loginfo['detail'] = flags[i]
self.varhistory.record(**loginfo)
self.dict[var][i] = flags[i]
def getVarFlags(self, var):
local_var = self._findVar(var)
flags = {}
if local_var:
for i in local_var:
if i.startswith("_"):
continue
flags[i] = local_var[i]
if len(flags) == 0:
return None
return flags
def delVarFlags(self, var, **loginfo):
if not var in self.dict:
self._makeShadowCopy(var)
if var in self.dict:
content = None
loginfo['op'] = 'delete flags'
self.varhistory.record(**loginfo)
# try to save the content
if "_content" in self.dict[var]:
content = self.dict[var]["_content"]
self.dict[var] = {}
self.dict[var]["_content"] = content
else:
del self.dict[var]
def createCopy(self):
"""
Create a copy of self by setting _data to self
"""
# we really want this to be a DataSmart...
data = DataSmart(seen=self._seen_overrides.copy(), special=self._special_values.copy())
data.dict["_data"] = self.dict
data.varhistory = self.varhistory.copy()
data.varhistory.datasmart = data
data.inchistory = self.inchistory.copy()
data._tracking = self._tracking
return data
def expandVarref(self, variable, parents=False):
"""Find all references to variable in the data and expand it
in place, optionally descending to parent datastores."""
if parents:
keys = iter(self)
else:
keys = self.localkeys()
ref = '${%s}' % variable
value = self.getVar(variable, False)
for key in keys:
referrervalue = self.getVar(key, False)
if referrervalue and ref in referrervalue:
self.setVar(key, referrervalue.replace(ref, value))
def localkeys(self):
for key in self.dict:
if key != '_data':
yield key
def __iter__(self):
def keylist(d):
klist = set()
for key in d:
if key == "_data":
continue
if not d[key]:
continue
klist.add(key)
if "_data" in d:
klist |= keylist(d["_data"])
return klist
for k in keylist(self.dict):
yield k
def __len__(self):
return len(frozenset(self))
def __getitem__(self, item):
value = self.getVar(item, False)
if value is None:
raise KeyError(item)
else:
return value
def __setitem__(self, var, value):
self.setVar(var, value)
def __delitem__(self, var):
self.delVar(var)
def get_hash(self):
data = {}
d = self.createCopy()
bb.data.expandKeys(d)
bb.data.update_data(d)
config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST", True) or "").split())
keys = set(key for key in iter(d) if not key.startswith("__"))
for key in keys:
if key in config_whitelist:
continue
value = d.getVar(key, False) or ""
data.update({key:value})
varflags = d.getVarFlags(key)
if not varflags:
continue
for f in varflags:
data.update({'%s[%s]' % (key, f):varflags[f]})
for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]:
bb_list = d.getVar(key, False) or []
bb_list.sort()
data.update({key:str(bb_list)})
if key == "__BBANONFUNCS":
for i in bb_list:
value = d.getVar(i, True) or ""
data.update({i:value})
data_str = str([(k, data[k]) for k in sorted(data.keys())])
return hashlib.md5(data_str).hexdigest()
|
|
""" Testing DKI """
from __future__ import division, print_function, absolute_import
import numpy as np
import random
import dipy.reconst.dki as dki
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_almost_equal)
from nose.tools import assert_raises
from dipy.sims.voxel import multi_tensor_dki
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.data import get_data
from dipy.reconst.dti import (from_lower_triangular, decompose_tensor)
from dipy.reconst.dki import (mean_kurtosis, carlson_rf, carlson_rd,
axial_kurtosis, radial_kurtosis, _positive_evals)
from dipy.core.sphere import Sphere
from dipy.core.geometry import perpendicular_directions
fimg, fbvals, fbvecs = get_data('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gtab = gradient_table(bvals, bvecs)
# 2 shells for techniques that requires multishell data
bvals_2s = np.concatenate((bvals, bvals * 2), axis=0)
bvecs_2s = np.concatenate((bvecs, bvecs), axis=0)
gtab_2s = gradient_table(bvals_2s, bvecs_2s)
# Simulation 1. signals of two crossing fibers are simulated
mevals_cross = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
[0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
angles_cross = [(80, 10), (80, 10), (20, 30), (20, 30)]
fie = 0.49
frac_cross = [fie*50, (1-fie) * 50, fie*50, (1-fie) * 50]
# Noise free simulates
signal_cross, dt_cross, kt_cross = multi_tensor_dki(gtab_2s, mevals_cross,
S0=100,
angles=angles_cross,
fractions=frac_cross,
snr=None)
evals_cross, evecs_cross = decompose_tensor(from_lower_triangular(dt_cross))
crossing_ref = np.concatenate((evals_cross, evecs_cross[0], evecs_cross[1],
evecs_cross[2], kt_cross), axis=0)
# Simulation 2. Spherical kurtosis tensor.- for white matter, this can be a
# biological implaussible scenario, however this simulation is usefull for
# testing the estimation of directional apparent kurtosis and the mean
# kurtosis, since its directional and mean kurtosis ground truth are a constant
# which can be easly mathematicaly calculated.
Di = 0.00099
De = 0.00226
mevals_sph = np.array([[Di, Di, Di], [De, De, De]])
frac_sph = [50, 50]
signal_sph, dt_sph, kt_sph = multi_tensor_dki(gtab_2s, mevals_sph, S0=100,
fractions=frac_sph,
snr=None)
evals_sph, evecs_sph = decompose_tensor(from_lower_triangular(dt_sph))
params_sph = np.concatenate((evals_sph, evecs_sph[0], evecs_sph[1],
evecs_sph[2], kt_sph), axis=0)
# Compute ground truth - since KT is spherical, appparent kurtosic coeficient
# for all gradient directions and mean kurtosis have to be equal to Kref_sph.
f = 0.5
Dg = f*Di + (1-f)*De
Kref_sphere = 3 * f * (1-f) * ((Di-De) / Dg) ** 2
# Simulation 3. Multi-voxel simulations - dataset of four voxels is simulated.
# Since the objective of this simulation is to see if procedures are able to
# work with multi-dimentional data all voxels contains the same crossing signal
# produced in simulation 1.
DWI = np.zeros((2, 2, 1, len(gtab_2s.bvals)))
DWI[0, 0, 0] = DWI[0, 1, 0] = DWI[1, 0, 0] = DWI[1, 1, 0] = signal_cross
multi_params = np.zeros((2, 2, 1, 27))
multi_params[0, 0, 0] = multi_params[0, 1, 0] = crossing_ref
multi_params[1, 0, 0] = multi_params[1, 1, 0] = crossing_ref
def test_positive_evals():
# Tested evals
L1 = np.array([[1e-3, 1e-3, 2e-3], [0, 1e-3, 0]])
L2 = np.array([[3e-3, 0, 2e-3], [1e-3, 1e-3, 0]])
L3 = np.array([[4e-3, 1e-4, 0], [0, 1e-3, 0]])
# only the first voxels have all eigenvalues larger than zero, thus:
expected_ind = np.array([[True, False, False], [False, True, False]],
dtype=bool)
# test function _positive_evals
ind = _positive_evals(L1, L2, L3)
assert_array_equal(ind, expected_ind)
def test_split_dki_param():
dkiM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="OLS")
dkiF = dkiM.fit(DWI)
evals, evecs, kt = dki.split_dki_param(dkiF.model_params)
assert_array_almost_equal(evals, dkiF.evals)
assert_array_almost_equal(evecs, dkiF.evecs)
assert_array_almost_equal(kt, dkiF.kt)
def test_dki_fits():
""" DKI fits are tested on noise free crossing fiber simulates """
# OLS fitting
dkiM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="OLS")
dkiF = dkiM.fit(signal_cross)
assert_array_almost_equal(dkiF.model_params, crossing_ref)
# WLS fitting
dki_wlsM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="WLS")
dki_wlsF = dki_wlsM.fit(signal_cross)
assert_array_almost_equal(dki_wlsF.model_params, crossing_ref)
# testing multi-voxels
dkiF_multi = dkiM.fit(DWI)
assert_array_almost_equal(dkiF_multi.model_params, multi_params)
dkiF_multi = dki_wlsM.fit(DWI)
assert_array_almost_equal(dkiF_multi.model_params, multi_params)
def test_apparent_kurtosis_coef():
""" Apparent kurtosis coeficients are tested for a spherical kurtosis
tensor """
sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])
AKC = dki.apparent_kurtosis_coef(params_sph, sph)
# check all direction
for d in range(len(gtab.bvecs[gtab.bvals > 0])):
assert_array_almost_equal(AKC[d], Kref_sphere)
def test_dki_predict():
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
pred = dkiM.predict(crossing_ref, S0=100)
assert_array_almost_equal(pred, signal_cross)
# just to check that it works with more than one voxel:
pred_multi = dkiM.predict(multi_params, S0=100)
assert_array_almost_equal(pred_multi, DWI)
# Check that it works with more than one voxel, and with a different S0
# in each voxel:
pred_multi = dkiM.predict(multi_params,
S0=100*np.ones(pred_multi.shape[:3]))
assert_array_almost_equal(pred_multi, DWI)
# check the function predict of the DiffusionKurtosisFit object
dkiF = dkiM.fit(DWI)
pred_multi = dkiF.predict(gtab_2s, S0=100)
assert_array_almost_equal(pred_multi, DWI)
dkiF = dkiM.fit(pred_multi)
pred_from_fit = dkiF.predict(dkiM.gtab, S0=100)
assert_array_almost_equal(pred_from_fit, DWI)
# Test the module function:
pred = dki.dki_prediction(crossing_ref, gtab_2s, S0=100)
assert_array_almost_equal(pred, signal_cross)
# Test the module function with S0 volume:
pred = dki.dki_prediction(multi_params, gtab_2s,
S0=100 * np.ones(multi_params.shape[:3]))
assert_array_almost_equal(pred, DWI)
def test_carlson_rf():
# Define inputs that we know the outputs from:
# Carlson, B.C., 1994. Numerical computation of real or complex
# elliptic integrals. arXiv:math/9409227 [math.CA]
# Real values (test in 2D format)
x = np.array([[1.0, 0.5], [2.0, 2.0]])
y = np.array([[2.0, 1.0], [3.0, 3.0]])
z = np.array([[0.0, 0.0], [4.0, 4.0]])
# Defene reference outputs
RF_ref = np.array([[1.3110287771461, 1.8540746773014],
[0.58408284167715, 0.58408284167715]])
# Compute integrals
RF = carlson_rf(x, y, z)
# Compare
assert_array_almost_equal(RF, RF_ref)
# Complex values
x = np.array([1j, 1j - 1, 1j, 1j - 1])
y = np.array([-1j, 1j, -1j, 1j])
z = np.array([0.0, 0.0, 2, 1 - 1j])
# Defene reference outputs
RF_ref = np.array([1.8540746773014, 0.79612586584234 - 1.2138566698365j,
1.0441445654064, 0.93912050218619 - 0.53296252018635j])
# Compute integrals
RF = carlson_rf(x, y, z, errtol=3e-5)
# Compare
assert_array_almost_equal(RF, RF_ref)
def test_carlson_rd():
# Define inputs that we know the outputs from:
# Carlson, B.C., 1994. Numerical computation of real or complex
# elliptic integrals. arXiv:math/9409227 [math.CA]
# Real values
x = np.array([0.0, 2.0])
y = np.array([2.0, 3.0])
z = np.array([1.0, 4.0])
# Defene reference outputs
RD_ref = np.array([1.7972103521034, 0.16510527294261])
# Compute integrals
RD = carlson_rd(x, y, z, errtol=1e-5)
# Compare
assert_array_almost_equal(RD, RD_ref)
# Complex values (testing in 2D format)
x = np.array([[1j, 0.0], [0.0, -2 - 1j]])
y = np.array([[-1j, 1j], [1j-1, -1j]])
z = np.array([[2.0, -1j], [1j, -1 + 1j]])
# Defene reference outputs
RD_ref = np.array([[0.65933854154220, 1.2708196271910 + 2.7811120159521j],
[-1.8577235439239 - 0.96193450888839j,
1.8249027393704 - 1.2218475784827j]])
# Compute integrals
RD = carlson_rd(x, y, z, errtol=1e-5)
# Compare
assert_array_almost_equal(RD, RD_ref)
def test_Wrotate_single_fiber():
# Rotate the kurtosis tensor of single fiber simulate to the diffusion
# tensor diagonal and check that is equal to the kurtosis tensor of the
# same single fiber simulated directly to the x-axis
# Define single fiber simulate
mevals = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
fie = 0.49
frac = [fie*100, (1 - fie)*100]
# simulate single fiber not aligned to the x-axis
theta = random.uniform(0, 180)
phi = random.uniform(0, 320)
angles = [(theta, phi), (theta, phi)]
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
evals, evecs = decompose_tensor(from_lower_triangular(dt))
kt_rotated = dki.Wrotate(kt, evecs)
# Now coordinate system has the DT diagonal aligned to the x-axis
# Reference simulation in which DT diagonal is directly aligned to the
# x-axis
angles = (90, 0), (90, 0)
signal, dt_ref, kt_ref = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
assert_array_almost_equal(kt_rotated, kt_ref)
def test_Wrotate_crossing_fibers():
# Test 2 - simulate crossing fibers intersecting at 70 degrees.
# In this case, diffusion tensor principal eigenvector will be aligned in
# the middle of the crossing fibers. Thus, after rotating the kurtosis
# tensor, this will be equal to a kurtosis tensor simulate of crossing
# fibers both deviating 35 degrees from the x-axis. Moreover, we know that
# crossing fibers will be aligned to the x-y plane, because the smaller
# diffusion eigenvalue, perpendicular to both crossings fibers, will be
# aligned to the z-axis.
# Simulate the crossing fiber
angles = [(90, 30), (90, 30), (20, 30), (20, 30)]
fie = 0.49
frac = [fie*50, (1-fie) * 50, fie*50, (1-fie) * 50]
mevals = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
[0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
evals, evecs = decompose_tensor(from_lower_triangular(dt))
kt_rotated = dki.Wrotate(kt, evecs)
# Now coordinate system has diffusion tensor diagonal aligned to the x-axis
# Simulate the reference kurtosis tensor
angles = [(90, 35), (90, 35), (90, -35), (90, -35)]
signal, dt, kt_ref = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
# Compare rotated with the reference
assert_array_almost_equal(kt_rotated, kt_ref)
def test_Wcons():
# Construct the 4D kurtosis tensor manualy from the crossing fiber kt
# simulate
Wfit = np.zeros([3, 3, 3, 3])
# Wxxxx
Wfit[0, 0, 0, 0] = kt_cross[0]
# Wyyyy
Wfit[1, 1, 1, 1] = kt_cross[1]
# Wzzzz
Wfit[2, 2, 2, 2] = kt_cross[2]
# Wxxxy
Wfit[0, 0, 0, 1] = Wfit[0, 0, 1, 0] = Wfit[0, 1, 0, 0] = kt_cross[3]
Wfit[1, 0, 0, 0] = kt_cross[3]
# Wxxxz
Wfit[0, 0, 0, 2] = Wfit[0, 0, 2, 0] = Wfit[0, 2, 0, 0] = kt_cross[4]
Wfit[2, 0, 0, 0] = kt_cross[4]
# Wxyyy
Wfit[0, 1, 1, 1] = Wfit[1, 0, 1, 1] = Wfit[1, 1, 1, 0] = kt_cross[5]
Wfit[1, 1, 0, 1] = kt_cross[5]
# Wxxxz
Wfit[1, 1, 1, 2] = Wfit[1, 2, 1, 1] = Wfit[2, 1, 1, 1] = kt_cross[6]
Wfit[1, 1, 2, 1] = kt_cross[6]
# Wxzzz
Wfit[0, 2, 2, 2] = Wfit[2, 2, 2, 0] = Wfit[2, 0, 2, 2] = kt_cross[7]
Wfit[2, 2, 0, 2] = kt_cross[7]
# Wyzzz
Wfit[1, 2, 2, 2] = Wfit[2, 2, 2, 1] = Wfit[2, 1, 2, 2] = kt_cross[8]
Wfit[2, 2, 1, 2] = kt_cross[8]
# Wxxyy
Wfit[0, 0, 1, 1] = Wfit[0, 1, 0, 1] = Wfit[0, 1, 1, 0] = kt_cross[9]
Wfit[1, 0, 0, 1] = Wfit[1, 0, 1, 0] = Wfit[1, 1, 0, 0] = kt_cross[9]
# Wxxzz
Wfit[0, 0, 2, 2] = Wfit[0, 2, 0, 2] = Wfit[0, 2, 2, 0] = kt_cross[10]
Wfit[2, 0, 0, 2] = Wfit[2, 0, 2, 0] = Wfit[2, 2, 0, 0] = kt_cross[10]
# Wyyzz
Wfit[1, 1, 2, 2] = Wfit[1, 2, 1, 2] = Wfit[1, 2, 2, 1] = kt_cross[11]
Wfit[2, 1, 1, 2] = Wfit[2, 2, 1, 1] = Wfit[2, 1, 2, 1] = kt_cross[11]
# Wxxyz
Wfit[0, 0, 1, 2] = Wfit[0, 0, 2, 1] = Wfit[0, 1, 0, 2] = kt_cross[12]
Wfit[0, 1, 2, 0] = Wfit[0, 2, 0, 1] = Wfit[0, 2, 1, 0] = kt_cross[12]
Wfit[1, 0, 0, 2] = Wfit[1, 0, 2, 0] = Wfit[1, 2, 0, 0] = kt_cross[12]
Wfit[2, 0, 0, 1] = Wfit[2, 0, 1, 0] = Wfit[2, 1, 0, 0] = kt_cross[12]
# Wxyyz
Wfit[0, 1, 1, 2] = Wfit[0, 1, 2, 1] = Wfit[0, 2, 1, 1] = kt_cross[13]
Wfit[1, 0, 1, 2] = Wfit[1, 1, 0, 2] = Wfit[1, 1, 2, 0] = kt_cross[13]
Wfit[1, 2, 0, 1] = Wfit[1, 2, 1, 0] = Wfit[2, 0, 1, 1] = kt_cross[13]
Wfit[2, 1, 0, 1] = Wfit[2, 1, 1, 0] = Wfit[1, 0, 2, 1] = kt_cross[13]
# Wxyzz
Wfit[0, 1, 2, 2] = Wfit[0, 2, 1, 2] = Wfit[0, 2, 2, 1] = kt_cross[14]
Wfit[1, 0, 2, 2] = Wfit[1, 2, 0, 2] = Wfit[1, 2, 2, 0] = kt_cross[14]
Wfit[2, 0, 1, 2] = Wfit[2, 0, 2, 1] = Wfit[2, 1, 0, 2] = kt_cross[14]
Wfit[2, 1, 2, 0] = Wfit[2, 2, 0, 1] = Wfit[2, 2, 1, 0] = kt_cross[14]
# Function to be tested
W4D = dki.Wcons(kt_cross)
Wfit = Wfit.reshape(-1)
W4D = W4D.reshape(-1)
assert_array_almost_equal(W4D, Wfit)
def test_spherical_dki_statistics():
# tests if MK, AK and RK are equal to expected values of a spherical
# kurtosis tensor
# Define multi voxel spherical kurtosis simulations
MParam = np.zeros((2, 2, 2, 27))
MParam[0, 0, 0] = MParam[0, 0, 1] = MParam[0, 1, 0] = params_sph
MParam[0, 1, 1] = MParam[1, 1, 0] = params_sph
# MParam[1, 1, 1], MParam[1, 0, 0], and MParam[1, 0, 1] remains zero
MRef = np.zeros((2, 2, 2))
MRef[0, 0, 0] = MRef[0, 0, 1] = MRef[0, 1, 0] = Kref_sphere
MRef[0, 1, 1] = MRef[1, 1, 0] = Kref_sphere
MRef[1, 1, 1] = MRef[1, 0, 0] = MRef[1, 0, 1] = 0
# Mean kurtosis analytical solution
MK_multi = mean_kurtosis(MParam)
assert_array_almost_equal(MK_multi, MRef)
# radial kurtosis analytical solution
RK_multi = radial_kurtosis(MParam)
assert_array_almost_equal(RK_multi, MRef)
# axial kurtosis analytical solution
AK_multi = axial_kurtosis(MParam)
assert_array_almost_equal(AK_multi, MRef)
def test_compare_MK_method():
# tests if analytical solution of MK is equal to the average of directional
# kurtosis sampled from a sphere
# DKI Model fitting
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
dkiF = dkiM.fit(signal_cross)
# MK analytical solution
MK_as = dkiF.mk()
# MK numerical method
sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])
MK_nm = np.mean(dki.apparent_kurtosis_coef(dkiF.model_params, sph),
axis=-1)
assert_array_almost_equal(MK_as, MK_nm, decimal=1)
def test_single_voxel_DKI_stats():
# tests if AK and RK are equal to expected values for a single fiber
# simulate randomly oriented
ADi = 0.00099
ADe = 0.00226
RDi = 0
RDe = 0.00087
# Reference values
AD = fie*ADi + (1-fie)*ADe
AK = 3 * fie * (1-fie) * ((ADi-ADe) / AD) ** 2
RD = fie*RDi + (1-fie)*RDe
RK = 3 * fie * (1-fie) * ((RDi-RDe) / RD) ** 2
ref_vals = np.array([AD, AK, RD, RK])
# simulate fiber randomly oriented
theta = random.uniform(0, 180)
phi = random.uniform(0, 320)
angles = [(theta, phi), (theta, phi)]
mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]])
frac = [fie*100, (1-fie)*100]
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, S0=100, angles=angles,
fractions=frac, snr=None)
evals, evecs = decompose_tensor(from_lower_triangular(dt))
dki_par = np.concatenate((evals, evecs[0], evecs[1], evecs[2], kt), axis=0)
# Estimates using dki functions
ADe1 = dki.axial_diffusivity(evals)
RDe1 = dki.radial_diffusivity(evals)
AKe1 = axial_kurtosis(dki_par)
RKe1 = radial_kurtosis(dki_par)
e1_vals = np.array([ADe1, AKe1, RDe1, RKe1])
assert_array_almost_equal(e1_vals, ref_vals)
# Estimates using the kurtosis class object
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
dkiF = dkiM.fit(signal)
e2_vals = np.array([dkiF.ad, dkiF.ak(), dkiF.rd, dkiF.rk()])
assert_array_almost_equal(e2_vals, ref_vals)
# test MK (note this test correspond to the MK singularity L2==L3)
MK_as = dkiF.mk()
sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])
MK_nm = np.mean(dkiF.akc(sph))
assert_array_almost_equal(MK_as, MK_nm, decimal=1)
def test_compare_RK_methods():
# tests if analytical solution of RK is equal to the perpendicular kurtosis
# relative to the first diffusion axis
# DKI Model fitting
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
dkiF = dkiM.fit(signal_cross)
# MK analytical solution
RK_as = dkiF.rk()
# MK numerical method
evecs = dkiF.evecs
p_dir = perpendicular_directions(evecs[:, 0], num=30, half=True)
ver = Sphere(xyz=p_dir)
RK_nm = np.mean(dki.apparent_kurtosis_coef(dkiF.model_params, ver),
axis=-1)
assert_array_almost_equal(RK_as, RK_nm)
def test_MK_singularities():
# To test MK in case that analytical solution was a singularity not covered
# by other tests
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
# test singularity L1 == L2 - this is the case of a prolate diffusion
# tensor for crossing fibers at 90 degrees
angles_all = np.array([[(90, 0), (90, 0), (0, 0), (0, 0)],
[(89.9, 0), (89.9, 0), (0, 0), (0, 0)]])
for angles_90 in angles_all:
s_90, dt_90, kt_90 = multi_tensor_dki(gtab_2s, mevals_cross, S0=100,
angles=angles_90,
fractions=frac_cross, snr=None)
dkiF = dkiM.fit(s_90)
MK = dkiF.mk()
sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])
MK_nm = np.mean(dkiF.akc(sph))
assert_almost_equal(MK, MK_nm, decimal=2)
# test singularity L1 == L3 and L1 != L2
# since L1 is defined as the larger eigenvalue and L3 the smallest
# eigenvalue, this singularity teoretically will never be called,
# because for L1 == L3, L2 have also to be = L1 and L2.
# Nevertheless, I decided to include this test since this singularity
# is revelant for cases that eigenvalues are not ordered
# artificially revert the eigenvalue and eigenvector order
dki_params = dkiF.model_params.copy()
dki_params[1] = dkiF.model_params[2]
dki_params[2] = dkiF.model_params[1]
dki_params[4] = dkiF.model_params[5]
dki_params[5] = dkiF.model_params[4]
dki_params[7] = dkiF.model_params[8]
dki_params[8] = dkiF.model_params[7]
dki_params[10] = dkiF.model_params[11]
dki_params[11] = dkiF.model_params[10]
MK = dki.mean_kurtosis(dki_params)
MK_nm = np.mean(dki.apparent_kurtosis_coef(dki_params, sph))
assert_almost_equal(MK, MK_nm, decimal=2)
def test_dki_errors():
# first error of DKI module is if a unknown fit method is given
assert_raises(ValueError, dki.DiffusionKurtosisModel, gtab_2s,
fit_method="JOANA")
# second error of DKI module is if a min_signal is defined as negative
assert_raises(ValueError, dki.DiffusionKurtosisModel, gtab_2s,
min_signal=-1)
# try case with correct min_signal
dkiM = dki.DiffusionKurtosisModel(gtab_2s, min_signal=1)
dkiF = dkiM.fit(DWI)
assert_array_almost_equal(dkiF.model_params, multi_params)
# third error is if a given mask do not have same shape as data
dkiM = dki.DiffusionKurtosisModel(gtab_2s)
# test a correct mask
dkiF = dkiM.fit(DWI)
mask_correct = dkiF.fa > 0
mask_correct[1, 1] = False
multi_params[1, 1] = np.zeros(27)
mask_not_correct = np.array([[True, True, False], [True, False, False]])
dkiF = dkiM.fit(DWI, mask=mask_correct)
assert_array_almost_equal(dkiF.model_params, multi_params)
# test a incorrect mask
assert_raises(ValueError, dkiM.fit, DWI, mask=mask_not_correct)
|
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import sparse
from ..externals import six
from .fixes import safe_copy
class DataConversionWarning(UserWarning):
"A warning on implicit data conversions happening in the code"
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"A warning on implicit dispatch to numpy.dot"
pass
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Array contains NaN or infinity.")
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
# First try an O(n) time, O(1) space solution for the common case that
# there everything is finite; fall back to O(n) space np.isfinite to
# prevent false positives from overflow in sum method.
_assert_all_finite(X.data if sparse.issparse(X) else X)
def safe_asarray(X, dtype=None, order=None, copy=False):
"""Convert X to an array or sparse matrix.
Prevents copying X when possible; sparse matrices are passed through."""
if sparse.issparse(X):
if copy:
X = X.copy()
assert_all_finite(X.data)
# enforces dtype on data array (order should be kept the same).
X.data = np.asarray(X.data, dtype=dtype)
else:
X = np.array(X, dtype=dtype, order=order, copy=copy)
assert_all_finite(X)
return X
def as_float_array(X, copy=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sparse.issparse(X)):
return safe_asarray(X, dtype=np.float64, copy=copy)
elif sparse.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def array2d(X, dtype=None, order=None, copy=False, force_all_finite=True):
"""Returns at least 2-d array with data from X"""
if sparse.issparse(X):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use X.toarray() to convert to dense.')
X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)
if force_all_finite:
_assert_all_finite(X_2d)
if X is X_2d and copy:
X_2d = safe_copy(X_2d)
return X_2d
def _atleast2d_or_sparse(X, dtype, order, copy, sparse_class, convmethod,
force_all_finite):
if sparse.issparse(X):
if dtype is None or X.dtype == dtype:
X = getattr(X, convmethod)()
else:
X = sparse_class(X, dtype=dtype)
if force_all_finite:
_assert_all_finite(X.data)
X.data = np.array(X.data, copy=False, order=order)
else:
X = array2d(X, dtype=dtype, order=order, copy=copy,
force_all_finite=force_all_finite)
return X
def atleast2d_or_csc(X, dtype=None, order=None, copy=False,
force_all_finite=True):
"""Like numpy.atleast_2d, but converts sparse matrices to CSC format.
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csc_matrix,
"tocsc", force_all_finite)
def atleast2d_or_csr(X, dtype=None, order=None, copy=False,
force_all_finite=True):
"""Like numpy.atleast_2d, but converts sparse matrices to CSR format
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csr_matrix,
"tocsr", force_all_finite)
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x)
def check_arrays(*arrays, **options):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
By default lists and tuples are converted to numpy arrays.
It is possible to enforce certain properties, such as dtype, continguity
and sparse matrix format (if a sparse matrix is passed).
Converting lists to arrays can be disabled by setting ``allow_lists=True``.
Lists can then contain arbitrary objects and are not checked for dtype,
finiteness or anything else but length. Arrays are still checked
and possibly converted.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays, unless allow_lists is specified.
sparse_format : 'csr', 'csc' or 'dense', None by default
If not None, any scipy.sparse matrix is converted to
Compressed Sparse Rows or Compressed Sparse Columns representations.
If 'dense', an error is raised when a sparse array is
passed.
copy : boolean, False by default
If copy is True, ensure that returned arrays are copies of the original
(if not already converted to another format earlier in the process).
check_ccontiguous : boolean, False by default
Check that the arrays are C contiguous
dtype : a numpy dtype instance, None by default
Enforce a specific dtype.
allow_lists : bool
Allow lists of arbitrary objects as input, just check their length.
Disables
"""
sparse_format = options.pop('sparse_format', None)
if sparse_format not in (None, 'csr', 'csc', 'dense'):
raise ValueError('Unexpected sparse format: %r' % sparse_format)
copy = options.pop('copy', False)
check_ccontiguous = options.pop('check_ccontiguous', False)
dtype = options.pop('dtype', None)
allow_lists = options.pop('allow_lists', False)
if options:
raise TypeError("Unexpected keyword arguments: %r" % options.keys())
if len(arrays) == 0:
return None
n_samples = _num_samples(arrays[0])
checked_arrays = []
for array in arrays:
array_orig = array
if array is None:
# special case: ignore optional y=None kwarg pattern
checked_arrays.append(array)
continue
size = _num_samples(array)
if size != n_samples:
raise ValueError("Found array with dim %d. Expected %d"
% (size, n_samples))
if not allow_lists or hasattr(array, "shape"):
if sparse.issparse(array):
if sparse_format == 'csr':
array = array.tocsr()
elif sparse_format == 'csc':
array = array.tocsc()
elif sparse_format == 'dense':
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if check_ccontiguous:
array.data = np.ascontiguousarray(array.data, dtype=dtype)
else:
array.data = np.asarray(array.data, dtype=dtype)
_assert_all_finite(array.data)
else:
if check_ccontiguous:
array = np.ascontiguousarray(array, dtype=dtype)
else:
array = np.asarray(array, dtype=dtype)
_assert_all_finite(array)
if copy and array is array_orig:
array = array.copy()
checked_arrays.append(array)
return checked_arrays
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point.
Returns True if a warning was raised (i.e. the input is not float) and
False otherwise, for easier input validation.
"""
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
return True
return False
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
|
|
import asyncio
import os
import socket
import stat
import sys
import warnings
from argparse import ArgumentParser
from collections import Iterable, MutableMapping
from importlib import import_module
from yarl import URL
from . import (hdrs, web_exceptions, web_fileresponse, web_middlewares,
web_protocol, web_request, web_response, web_server,
web_urldispatcher, web_ws)
from .abc import AbstractMatchInfo, AbstractRouter
from .helpers import FrozenList
from .http import HttpVersion # noqa
from .log import access_logger, web_logger
from .signals import FuncSignal, PostSignal, PreSignal, Signal
from .web_exceptions import * # noqa
from .web_fileresponse import * # noqa
from .web_middlewares import * # noqa
from .web_protocol import * # noqa
from .web_request import * # noqa
from .web_response import * # noqa
from .web_server import Server
from .web_urldispatcher import * # noqa
from .web_urldispatcher import PrefixedSubAppResource
from .web_ws import * # noqa
__all__ = (web_protocol.__all__ +
web_fileresponse.__all__ +
web_request.__all__ +
web_response.__all__ +
web_exceptions.__all__ +
web_urldispatcher.__all__ +
web_ws.__all__ +
web_server.__all__ +
web_middlewares.__all__ +
('Application', 'HttpVersion', 'MsgType'))
class Application(MutableMapping):
def __init__(self, *, logger=web_logger, router=None, middlewares=(),
handler_args=None, client_max_size=1024**2,
loop=None, debug=...):
if router is None:
router = web_urldispatcher.UrlDispatcher()
assert isinstance(router, AbstractRouter), router
if loop is not None:
warnings.warn("loop argument is deprecated", ResourceWarning)
self._debug = debug
self._router = router
self._secure_proxy_ssl_header = None
self._loop = loop
self._handler_args = handler_args
self.logger = logger
self._middlewares = FrozenList(middlewares)
self._state = {}
self._frozen = False
self._subapps = []
self._on_pre_signal = PreSignal()
self._on_post_signal = PostSignal()
self._on_loop_available = FuncSignal(self)
self._on_response_prepare = Signal(self)
self._on_startup = Signal(self)
self._on_shutdown = Signal(self)
self._on_cleanup = Signal(self)
self._client_max_size = client_max_size
# MutableMapping API
def __getitem__(self, key):
return self._state[key]
def _check_frozen(self):
if self._frozen:
warnings.warn("Changing state of started or joined "
"application is deprecated",
DeprecationWarning,
stacklevel=3)
def __setitem__(self, key, value):
self._check_frozen()
self._state[key] = value
def __delitem__(self, key):
self._check_frozen()
del self._state[key]
def __len__(self):
return len(self._state)
def __iter__(self):
return iter(self._state)
########
@property
def loop(self):
return self._loop
def _set_loop(self, loop):
if loop is None:
loop = asyncio.get_event_loop()
if self._loop is not None and self._loop is not loop:
raise RuntimeError(
"web.Application instance initialized with different loop")
self._loop = loop
self._on_loop_available.send(self)
# set loop debug
if self._debug is ...:
self._debug = loop.get_debug()
# set loop to sub applications
for subapp in self._subapps:
subapp._set_loop(loop)
@property
def frozen(self):
return self._frozen
def freeze(self):
if self._frozen:
return
self._frozen = True
self._middlewares = tuple(reversed(self._middlewares))
self._router.freeze()
self._on_loop_available.freeze()
self._on_pre_signal.freeze()
self._on_post_signal.freeze()
self._on_response_prepare.freeze()
self._on_startup.freeze()
self._on_shutdown.freeze()
self._on_cleanup.freeze()
for subapp in self._subapps:
subapp.freeze()
@property
def debug(self):
return self._debug
def _reg_subapp_signals(self, subapp):
def reg_handler(signame):
subsig = getattr(subapp, signame)
@asyncio.coroutine
def handler(app):
yield from subsig.send(subapp)
appsig = getattr(self, signame)
appsig.append(handler)
reg_handler('on_startup')
reg_handler('on_shutdown')
reg_handler('on_cleanup')
def add_subapp(self, prefix, subapp):
if self.frozen:
raise RuntimeError(
"Cannot add sub application to frozen application")
if subapp.frozen:
raise RuntimeError("Cannot add frozen application")
if prefix.endswith('/'):
prefix = prefix[:-1]
if prefix in ('', '/'):
raise ValueError("Prefix cannot be empty")
resource = PrefixedSubAppResource(prefix, subapp)
self.router.register_resource(resource)
self._reg_subapp_signals(subapp)
self._subapps.append(subapp)
if self._loop is not None:
subapp._set_loop(self._loop)
return resource
@property
def on_loop_available(self):
return self._on_loop_available
@property
def on_response_prepare(self):
return self._on_response_prepare
@property
def on_pre_signal(self):
return self._on_pre_signal
@property
def on_post_signal(self):
return self._on_post_signal
@property
def on_startup(self):
return self._on_startup
@property
def on_shutdown(self):
return self._on_shutdown
@property
def on_cleanup(self):
return self._on_cleanup
@property
def router(self):
return self._router
@property
def middlewares(self):
return self._middlewares
def make_handler(self, *, loop=None,
secure_proxy_ssl_header=None, **kwargs):
self._set_loop(loop)
self.freeze()
kwargs['debug'] = self.debug
if self._handler_args:
for k, v in self._handler_args.items():
kwargs[k] = v
self._secure_proxy_ssl_header = secure_proxy_ssl_header
return Server(self._handle, request_factory=self._make_request,
loop=self.loop, **kwargs)
@asyncio.coroutine
def startup(self):
"""Causes on_startup signal
Should be called in the event loop along with the request handler.
"""
yield from self.on_startup.send(self)
@asyncio.coroutine
def shutdown(self):
"""Causes on_shutdown signal
Should be called before cleanup()
"""
yield from self.on_shutdown.send(self)
@asyncio.coroutine
def cleanup(self):
"""Causes on_cleanup signal
Should be called after shutdown()
"""
yield from self.on_cleanup.send(self)
def _make_request(self, message, payload, protocol, writer, task,
_cls=web_request.Request):
return _cls(
message, payload, protocol, writer, protocol._time_service, task,
secure_proxy_ssl_header=self._secure_proxy_ssl_header,
client_max_size=self._client_max_size)
@asyncio.coroutine
def _handle(self, request):
match_info = yield from self._router.resolve(request)
assert isinstance(match_info, AbstractMatchInfo), match_info
match_info.add_app(self)
if __debug__:
match_info.freeze()
resp = None
request._match_info = match_info
expect = request.headers.get(hdrs.EXPECT)
if expect:
resp = (
yield from match_info.expect_handler(request))
if resp is None:
handler = match_info.handler
for app in match_info.apps:
for factory in app._middlewares:
handler = yield from factory(app, handler)
resp = yield from handler(request)
assert isinstance(resp, web_response.StreamResponse), \
("Handler {!r} should return response instance, "
"got {!r} [middlewares {!r}]").format(
match_info.handler, type(resp),
[middleware for middleware in app.middlewares
for app in match_info.apps])
return resp
def __call__(self):
"""gunicorn compatibility"""
return self
def __repr__(self):
return "<Application 0x{:x}>".format(id(self))
def run_app(app, *, host=None, port=None, path=None, sock=None,
shutdown_timeout=60.0, ssl_context=None,
print=print, backlog=128, access_log_format=None,
access_log=access_logger, loop=None):
"""Run an app locally"""
if loop is None:
loop = asyncio.get_event_loop()
make_handler_kwargs = dict()
if access_log_format is not None:
make_handler_kwargs['access_log_format'] = access_log_format
handler = app.make_handler(loop=loop, access_log=access_log,
**make_handler_kwargs)
loop.run_until_complete(app.startup())
scheme = 'https' if ssl_context else 'http'
base_url = URL('{}://localhost'.format(scheme)).with_port(port)
if path is None:
paths = ()
elif isinstance(path, (str, bytes, bytearray, memoryview))\
or not isinstance(path, Iterable):
paths = (path,)
else:
paths = path
if sock is None:
socks = ()
elif not isinstance(sock, Iterable):
socks = (sock,)
else:
socks = sock
if host is None:
if (paths or socks) and not port:
hosts = ()
else:
hosts = ("0.0.0.0",)
elif isinstance(host, (str, bytes, bytearray, memoryview))\
or not isinstance(host, Iterable):
hosts = (host,)
else:
hosts = host
if hosts and port is None:
port = 8443 if ssl_context else 8080
server_creations = []
uris = [str(base_url.with_host(host)) for host in hosts]
if hosts:
# Multiple hosts bound to same server is available in most loop
# implementations, but only send multiple if we have multiple.
host_binding = hosts[0] if len(hosts) == 1 else hosts
server_creations.append(
loop.create_server(
handler, host_binding, port, ssl=ssl_context, backlog=backlog
)
)
for path in paths:
# Most loop implementations don't support multiple paths bound in same
# server, so create a server for each.
server_creations.append(
loop.create_unix_server(
handler, path, ssl=ssl_context, backlog=backlog
)
)
uris.append('{}://unix:{}:'.format(scheme, path))
# Clean up prior socket path if stale and not abstract.
# CPython 3.5.3+'s event loop already does this. See
# https://github.com/python/asyncio/issues/425
if path[0] not in (0, '\x00'): # pragma: no branch
try:
if stat.S_ISSOCK(os.stat(path).st_mode):
os.remove(path)
except FileNotFoundError:
pass
for sock in socks:
server_creations.append(
loop.create_server(
handler, sock=sock, ssl=ssl_context, backlog=backlog
)
)
if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:
uris.append('{}://unix:{}:'.format(scheme, sock.getsockname()))
else:
host, port = sock.getsockname()
uris.append(str(base_url.with_host(host).with_port(port)))
servers = loop.run_until_complete(
asyncio.gather(*server_creations, loop=loop)
)
print("======== Running on {} ========\n"
"(Press CTRL+C to quit)".format(', '.join(uris)))
try:
loop.run_forever()
except KeyboardInterrupt: # pragma: no cover
pass
finally:
server_closures = []
for srv in servers:
srv.close()
server_closures.append(srv.wait_closed())
loop.run_until_complete(asyncio.gather(*server_closures, loop=loop))
loop.run_until_complete(app.shutdown())
loop.run_until_complete(handler.shutdown(shutdown_timeout))
loop.run_until_complete(app.cleanup())
loop.close()
def main(argv):
arg_parser = ArgumentParser(
description="aiohttp.web Application server",
prog="aiohttp.web"
)
arg_parser.add_argument(
"entry_func",
help=("Callable returning the `aiohttp.web.Application` instance to "
"run. Should be specified in the 'module:function' syntax."),
metavar="entry-func"
)
arg_parser.add_argument(
"-H", "--hostname",
help="TCP/IP hostname to serve on (default: %(default)r)",
default="localhost"
)
arg_parser.add_argument(
"-P", "--port",
help="TCP/IP port to serve on (default: %(default)r)",
type=int,
default="8080"
)
arg_parser.add_argument(
"-U", "--path",
help="Unix file system path to serve on. Specifying a path will cause "
"hostname and port arguments to be ignored.",
)
args, extra_argv = arg_parser.parse_known_args(argv)
# Import logic
mod_str, _, func_str = args.entry_func.partition(":")
if not func_str or not mod_str:
arg_parser.error(
"'entry-func' not in 'module:function' syntax"
)
if mod_str.startswith("."):
arg_parser.error("relative module names not supported")
try:
module = import_module(mod_str)
except ImportError as ex:
arg_parser.error("unable to import %s: %s" % (mod_str, ex))
try:
func = getattr(module, func_str)
except AttributeError:
arg_parser.error("module %r has no attribute %r" % (mod_str, func_str))
# Compatibility logic
if args.path is not None and not hasattr(socket, 'AF_UNIX'):
arg_parser.error("file system paths not supported by your operating"
" environment")
app = func(extra_argv)
run_app(app, host=args.hostname, port=args.port, path=args.path)
arg_parser.exit(message="Stopped\n")
if __name__ == "__main__": # pragma: no branch
main(sys.argv[1:]) # pragma: no cover
|
|
# -*- coding: utf-8 -*-
"""
Tests that the file header is properly handled or inferred
during parsing for all of the parsers defined in parsers.py
"""
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, lrange, u
class HeaderTests(object):
def test_read_with_bad_header(self):
errmsg = "but only \d+ lines in file"
with tm.assertRaisesRegexp(ValueError, errmsg):
s = StringIO(',,')
self.read_csv(s, header=[10])
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with tm.assertRaises(TypeError):
self.read_csv(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
self.read_table(StringIO(data), header=arg)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(
df_pref.columns, ['Field0', 'Field1', 'Field2',
'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# INVALID OPTIONS
# no as_recarray
self.assertRaises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True,
tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'],
tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'],
tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples(
[('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('r'), u('s'), u('t'),
u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('r'), u('s'), u('t'),
u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array(
[[3, 4, 5, 6], [9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2], [0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_read_only_header_no_rows(self):
# See gh-7773
expected = DataFrame(columns=['a', 'b', 'c'])
df = self.read_csv(StringIO('a,b,c'))
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO('a,b,c'), index_col=False)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
|
|
# -*- coding: utf-8 -*-
"""
Pdb debugger class.
Modified from the standard pdb.Pdb class to avoid including readline, so that
the command line completion of other programs which include this isn't
damaged.
In the future, this class will be expanded with improvements over the standard
pdb.
The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
changes. Licensing should therefore be under the standard Python terms. For
details on the PSF (Python Software Foundation) standard license, see:
https://docs.python.org/2/license.html
"""
#*****************************************************************************
#
# This file is licensed under the PSF license.
#
# Copyright (C) 2001 Python Software Foundation, www.python.org
# Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
#
#
#*****************************************************************************
import bdb
import functools
import inspect
import linecache
import sys
import warnings
from IPython import get_ipython
from IPython.utils import PyColorize
from IPython.utils import coloransi, py3compat
from IPython.core.excolors import exception_colors
from IPython.testing.skipdoctest import skip_doctest
prompt = 'ipdb> '
#We have to check this directly from sys.argv, config struct not yet available
from pdb import Pdb as OldPdb
# Allow the set_trace code to operate outside of an ipython instance, even if
# it does so with some limitations. The rest of this support is implemented in
# the Tracer constructor.
def make_arrow(pad):
"""generate the leading arrow in front of traceback or debugger"""
if pad >= 2:
return '-'*(pad-2) + '> '
elif pad == 1:
return '>'
return ''
def BdbQuit_excepthook(et, ev, tb, excepthook=None):
"""Exception hook which handles `BdbQuit` exceptions.
All other exceptions are processed using the `excepthook`
parameter.
"""
warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
DeprecationWarning, stacklevel=2)
if et==bdb.BdbQuit:
print('Exiting Debugger.')
elif excepthook is not None:
excepthook(et, ev, tb)
else:
# Backwards compatibility. Raise deprecation warning?
BdbQuit_excepthook.excepthook_ori(et,ev,tb)
def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
warnings.warn(
"`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
DeprecationWarning, stacklevel=2)
print('Exiting Debugger.')
class Tracer(object):
"""
DEPRECATED
Class for local debugging, similar to pdb.set_trace.
Instances of this class, when called, behave like pdb.set_trace, but
providing IPython's enhanced capabilities.
This is implemented as a class which must be initialized in your own code
and not as a standalone function because we need to detect at runtime
whether IPython is already active or not. That detection is done in the
constructor, ensuring that this code plays nicely with a running IPython,
while functioning acceptably (though with limitations) if outside of it.
"""
@skip_doctest
def __init__(self, colors=None):
"""
DEPRECATED
Create a local debugger instance.
Parameters
----------
colors : str, optional
The name of the color scheme to use, it must be one of IPython's
valid color schemes. If not given, the function will default to
the current IPython scheme when running inside IPython, and to
'NoColor' otherwise.
Examples
--------
::
from IPython.core.debugger import Tracer; debug_here = Tracer()
Later in your code::
debug_here() # -> will open up the debugger at that point.
Once the debugger activates, you can use all of its regular commands to
step through code, set breakpoints, etc. See the pdb documentation
from the Python standard library for usage details.
"""
warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
"`IPython.core.debugger.Pdb.set_trace()`",
DeprecationWarning, stacklevel=2)
ip = get_ipython()
if ip is None:
# Outside of ipython, we set our own exception hook manually
sys.excepthook = functools.partial(BdbQuit_excepthook,
excepthook=sys.excepthook)
def_colors = 'NoColor'
else:
# In ipython, we use its custom exception handler mechanism
def_colors = ip.colors
ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
if colors is None:
colors = def_colors
# The stdlib debugger internally uses a modified repr from the `repr`
# module, that limits the length of printed strings to a hardcoded
# limit of 30 characters. That much trimming is too aggressive, let's
# at least raise that limit to 80 chars, which should be enough for
# most interactive uses.
try:
try:
from reprlib import aRepr # Py 3
except ImportError:
from repr import aRepr # Py 2
aRepr.maxstring = 80
except:
# This is only a user-facing convenience, so any error we encounter
# here can be warned about but can be otherwise ignored. These
# printouts will tell us about problems if this API changes
import traceback
traceback.print_exc()
self.debugger = Pdb(colors)
def __call__(self):
"""Starts an interactive debugger at the point where called.
This is similar to the pdb.set_trace() function from the std lib, but
using IPython's enhanced debugger."""
self.debugger.set_trace(sys._getframe().f_back)
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
"""Make new_fn have old_fn's doc string. This is particularly useful
for the ``do_...`` commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth."""
def wrapper(*args, **kw):
return new_fn(*args, **kw)
if old_fn.__doc__:
wrapper.__doc__ = old_fn.__doc__ + additional_text
return wrapper
def _file_lines(fname):
"""Return the contents of a named file as a list of lines.
This function never raises an IOError exception: if the file can't be
read, it simply returns an empty list."""
try:
outfile = open(fname)
except IOError:
return []
else:
out = outfile.readlines()
outfile.close()
return out
class Pdb(OldPdb):
"""Modified Pdb class, does not load readline.
for a standalone version that uses prompt_toolkit, see
`IPython.terminal.debugger.TerminalPdb` and
`IPython.terminal.debugger.set_trace()`
"""
def __init__(self, color_scheme=None, completekey=None,
stdin=None, stdout=None, context=5):
# Parent constructor:
try:
self.context = int(context)
if self.context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
OldPdb.__init__(self, completekey, stdin, stdout)
# IPython changes...
self.shell = get_ipython()
if self.shell is None:
save_main = sys.modules['__main__']
# No IPython instance running, we must create one
from IPython.terminal.interactiveshell import \
TerminalInteractiveShell
self.shell = TerminalInteractiveShell.instance()
# needed by any code which calls __import__("__main__") after
# the debugger was entered. See also #9941.
sys.modules['__main__'] = save_main
if color_scheme is not None:
warnings.warn(
"The `color_scheme` argument is deprecated since version 5.1",
DeprecationWarning, stacklevel=2)
else:
color_scheme = self.shell.colors
self.aliases = {}
# Create color table: we copy the default one from the traceback
# module and add a few attributes needed for debugging
self.color_scheme_table = exception_colors()
# shorthands
C = coloransi.TermColors
cst = self.color_scheme_table
cst['NoColor'].colors.prompt = C.NoColor
cst['NoColor'].colors.breakpoint_enabled = C.NoColor
cst['NoColor'].colors.breakpoint_disabled = C.NoColor
cst['Linux'].colors.prompt = C.Green
cst['Linux'].colors.breakpoint_enabled = C.LightRed
cst['Linux'].colors.breakpoint_disabled = C.Red
cst['LightBG'].colors.prompt = C.Blue
cst['LightBG'].colors.breakpoint_enabled = C.LightRed
cst['LightBG'].colors.breakpoint_disabled = C.Red
cst['Neutral'].colors.prompt = C.Blue
cst['Neutral'].colors.breakpoint_enabled = C.LightRed
cst['Neutral'].colors.breakpoint_disabled = C.Red
# Add a python parser so we can syntax highlight source while
# debugging.
self.parser = PyColorize.Parser(style=color_scheme)
self.set_colors(color_scheme)
# Set the prompt - the default prompt is '(Pdb)'
self.prompt = prompt
def set_colors(self, scheme):
"""Shorthand access to the color table scheme selector method."""
self.color_scheme_table.set_active_scheme(scheme)
self.parser.style = scheme
def interaction(self, frame, traceback):
try:
OldPdb.interaction(self, frame, traceback)
except KeyboardInterrupt:
sys.stdout.write('\n' + self.shell.get_exception_only())
def new_do_up(self, arg):
OldPdb.do_up(self, arg)
do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
def new_do_down(self, arg):
OldPdb.do_down(self, arg)
do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
def new_do_frame(self, arg):
OldPdb.do_frame(self, arg)
def new_do_quit(self, arg):
if hasattr(self, 'old_all_completions'):
self.shell.Completer.all_completions=self.old_all_completions
return OldPdb.do_quit(self, arg)
do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
def new_do_restart(self, arg):
"""Restart command. In the context of ipython this is exactly the same
thing as 'quit'."""
self.msg("Restart doesn't make sense here. Using 'quit' instead.")
return self.do_quit(arg)
def print_stack_trace(self, context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno, context=context)
except KeyboardInterrupt:
pass
def print_stack_entry(self,frame_lineno, prompt_prefix='\n-> ',
context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
print(self.format_stack_entry(frame_lineno, '', context))
# vds: >>
frame, lineno = frame_lineno
filename = frame.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
print("Context must be a positive integer")
except (TypeError, ValueError):
print("Context must be a positive integer")
try:
import reprlib # Py 3
except ImportError:
import repr as reprlib # Py 2
ret = []
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
ColorsNormal)
frame, lineno = frame_lineno
return_value = ''
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
#return_value += '->'
return_value += reprlib.repr(rv) + '\n'
ret.append(return_value)
#s = filename + '(' + `lineno` + ')'
filename = self.canonic(frame.f_code.co_filename)
link = tpl_link % py3compat.cast_unicode(filename)
if frame.f_code.co_name:
func = frame.f_code.co_name
else:
func = "<lambda>"
call = ''
if func != '?':
if '__args__' in frame.f_locals:
args = reprlib.repr(frame.f_locals['__args__'])
else:
args = '()'
call = tpl_call % (func, args)
# The level info should be generated in the same format pdb uses, to
# avoid breaking the pdbtrack functionality of python-mode in *emacs.
if frame is self.curframe:
ret.append('> ')
else:
ret.append(' ')
ret.append(u'%s(%s)%s\n' % (link,lineno,call))
start = lineno - 1 - context//2
lines = linecache.getlines(filename)
start = min(start, len(lines) - context)
start = max(start, 0)
lines = lines[start : start + context]
for i,line in enumerate(lines):
show_arrow = (start + 1 + i == lineno)
linetpl = (frame is self.curframe or show_arrow) \
and tpl_line_em \
or tpl_line
ret.append(self.__format_line(linetpl, filename,
start + 1 + i, line,
arrow = show_arrow) )
return ''.join(ret)
def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
bp_mark = ""
bp_mark_color = ""
new_line, err = self.parser.format2(line, 'str')
if not err:
line = new_line
bp = None
if lineno in self.get_file_breaks(filename):
bps = self.get_breaks(filename, lineno)
bp = bps[-1]
if bp:
Colors = self.color_scheme_table.active_colors
bp_mark = str(bp.number)
bp_mark_color = Colors.breakpoint_enabled
if not bp.enabled:
bp_mark_color = Colors.breakpoint_disabled
numbers_width = 7
if arrow:
# This is the line with the error
pad = numbers_width - len(str(lineno)) - len(bp_mark)
num = '%s%s' % (make_arrow(pad), str(lineno))
else:
num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
return tpl_line % (bp_mark_color + bp_mark, num, line)
def print_list_lines(self, filename, first, last):
"""The printing (as opposed to the parsing part of a 'list'
command."""
try:
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
src = []
if filename == "<string>" and hasattr(self, "_exec_filename"):
filename = self._exec_filename
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno)
if not line:
break
if lineno == self.curframe.f_lineno:
line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
else:
line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
src.append(line)
self.lineno = lineno
print(''.join(src))
except KeyboardInterrupt:
pass
def do_list(self, arg):
"""Print lines of code from the current stack frame
"""
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print('*** Error in argument:', repr(arg))
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
self.print_list_lines(self.curframe.f_code.co_filename, first, last)
# vds: >>
lineno = first
filename = self.curframe.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
do_l = do_list
def getsourcelines(self, obj):
lines, lineno = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
# must be a module frame: do not try to cut a block out of it
return lines, 1
elif inspect.ismodule(obj):
return lines, 1
return inspect.getblock(lines[lineno:]), lineno+1
def do_longlist(self, arg):
"""Print lines of code from the current stack frame.
Shows more lines than 'list' does.
"""
self.lastcmd = 'longlist'
try:
lines, lineno = self.getsourcelines(self.curframe)
except OSError as err:
self.error(err)
return
last = lineno + len(lines)
self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
do_ll = do_longlist
def do_pdef(self, arg):
"""Print the call signature for any callable object.
The debugger interface to %pdef"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
def do_pdoc(self, arg):
"""Print the docstring for an object.
The debugger interface to %pdoc."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
def do_pfile(self, arg):
"""Print (or run through pager) the file where an object is defined.
The debugger interface to %pfile.
"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
def do_pinfo(self, arg):
"""Provide detailed information about an object.
The debugger interface to %pinfo, i.e., obj?."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
def do_pinfo2(self, arg):
"""Provide extra detailed information about an object.
The debugger interface to %pinfo2, i.e., obj??."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
def do_psource(self, arg):
"""Print (or run through pager) the source code for an object."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
def do_where(self, arg):
"""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.
Take a number as argument as an (optional) number of context line to
print"""
if arg:
context = int(arg)
self.print_stack_trace(context)
else:
self.print_stack_trace()
do_w = do_where
def set_trace(frame=None):
"""
Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
Pdb().set_trace(frame or sys._getframe().f_back)
|
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import requests
import warnings
from coinbase.compat import imap
from coinbase.compat import quote
from coinbase.compat import urljoin
from coinbase.compat import urlparse
from coinbase.model import PaymentMethod
from coinbase.auth import HMACAuth
from coinbase.auth import OAuth2Auth
from coinbase.error import APIError
from coinbase.error import AuthenticationError
from coinbase.error import ExpiredAccessToken
from coinbase.error import InvalidAccessToken
from coinbase.error import TokenRefreshError
from coinbase.error import TwoFactorTokenRequired
from coinbase.error import UnexpectedDataFormatError
from coinbase.error import build_api_error
from coinbase.model import APIObject
from coinbase.model import Account
from coinbase.model import User
from coinbase.util import encode_params
class Client(object):
""" API Client for the Coinbase API.
Entry point for making requests to the Coinbase API. Provides helper methods
for common API endpoints, as well as niceties around response verification
and formatting.
Any errors will be raised as exceptions. These exceptions will always be
subclasses of `coinbase.error.APIError`. HTTP-related errors will also be
subclasses of `requests.HTTPError`.
Full API docs, including descriptions of each API and its paramters, are
available here: https://developers.coinbase.com/api
"""
VERIFY_SSL = True
BASE_API_URI = 'https://api.coinbase.com/v1/'
_model_key = '__api_client'
def __init__(self, api_key, api_secret, base_api_uri=None):
if not api_key:
raise ValueError('Missing `api_key`.')
if not api_secret:
raise ValueError('Missing `api_secret`.')
# Allow passing in a different API base.
self._set_base_api_uri(base_api_uri or self.BASE_API_URI)
# Set up a requests session for interacting with the API.
self.session = self._build_session(HMACAuth, api_key, api_secret)
def _build_session(self, auth_class, *args, **kwargs):
"""Internal helper for creating a requests `session` with the correct
authentication handling."""
session = requests.session()
session.auth = auth_class(*args, **kwargs)
session.headers.update({'Accept': 'application/json',
'Content-Type': 'application/json',
'User-Agent': 'coinbase/python/1.0'})
return session
def _set_base_api_uri(self, base_api_uri):
"""Internal helper for setting a new base API URL. Warns if the URL is
insecure."""
self.BASE_API_URI = base_api_uri
if urlparse(self.BASE_API_URI).scheme != 'https':
warning_message = (
'WARNING: this client is sending a request to an insecure'
' API endpoint. Any API request you make may expose your API key and'
' secret to third parties. Consider using the default endpoint:\n\n'
' %s\n') % Client.BASE_API_URI
warnings.warn(warning_message, UserWarning)
def _create_api_uri(self, *parts):
"""Internal helper for creating fully qualified endpoint URIs."""
return urljoin(self.BASE_API_URI, '/'.join(imap(quote, parts)))
def _request(self, method, *relative_path_parts, **kwargs):
"""Internal helper for creating HTTP requests to the Coinbase API.
Raises an APIError if the response is not 200. Otherwise, returns the
response object. Not intended for direct use by API consumers.
"""
uri = self._create_api_uri(*relative_path_parts)
kwargs.update({'verify': self.VERIFY_SSL})
response = getattr(self.session, method)(uri, **kwargs)
return self._handle_response(response)
def _handle_response(self, response):
"""Internal helper for handling API responses from the Coinbase server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if response.status_code == 200:
return response
# If the API response was not 200, an error occurred. Raise an exception
# with the details of the error and references to the full response and,
# when possible, request. These exceptions are intended to bubble up to the
# consuming user. If the error is authentication related, raise a more
# specific exception.
if response.status_code == 401:
raise build_api_error(AuthenticationError, response)
raise build_api_error(APIError, response)
def _get(self, *args, **kwargs):
return self._request('get', *args, **kwargs)
def _post(self, *args, **kwargs):
return self._request('post', *args, **kwargs)
def _put(self, *args, **kwargs):
return self._request('put', *args, **kwargs)
def _delete(self, *args, **kwargs):
return self._request('delete', *args, **kwargs)
def _make_api_object(self, *args, **kwargs):
root_api_object = kwargs.get('account', None) or APIObject(api_client=self)
return root_api_object.load(*args, **kwargs)
def get_authorization(self, **kwargs):
"""https://developers.coinbase.com/api#authorization"""
response = self._get('authorization', **kwargs)
return self._make_api_object(response.json())
def get_accounts(self, page=None, limit=None, all_accounts=None):
"""https://developers.coinbase.com/api#list-accounts"""
data = encode_params({
'page': page,
'limit': limit,
'all_accounts': all_accounts,
}, bools_to_ints=True)
response = self._get('accounts', data=data)
return self._make_api_object(value=response.json(), paged_key='accounts')
def get_account(self, account_id=None):
"""https://developers.coinbase.com/api#show-an-account
If the `account_id` parameter is omitted, this method will fetch details
on the primary account.
"""
if account_id is None:
account_id = 'primary'
response = self._get('accounts', account_id)
api_obj = self._make_api_object(response.json())
account = api_obj.get('account', None)
if not isinstance(account, Account):
raise build_api_error(
UnexpectedDataFormatError,
response,
'Could not parse API response')
return account
def create_account(self, name):
"""https://developers.coinbase.com/api#create-an-account"""
data = encode_params({
'account': {
'name': name,
},
})
response = self._post('accounts', data=data)
api_obj = self._make_api_object(response.json())
if not api_obj.get('success', False):
raise build_api_error(APIError, response, 'Failed to create an account')
account = api_obj.get('account', None)
if not isinstance(account, Account):
raise build_api_error(
UnexpectedDataFormatError,
response,
'Could not parse API response')
return account
def redeem_token(self, token_id):
data = encode_params({
'token_id': token_id})
response = self._post('tokens/redeem', data=data)
api_obj = self._make_api_object(response.json())
return api_obj.get('success', False)
def get_contacts(self, page=None, limit=None, all_accounts=None):
"""https://developers.coinbase.com/api#contacts"""
data = encode_params({
'page': page,
'limit': limit,
'all_accounts': all_accounts,
})
response = self._get('contacts', data=data)
return self._make_api_object(response.json(), paged_key='contacts')
def get_current_user(self):
"""https://developers.coinbase.com/api#get-current-user"""
response = self._get('users', 'self')
api_obj = self._make_api_object(response.json())
user = api_obj.get('user', None)
if not isinstance(user, User):
raise build_api_error(
UnexpectedDataFormatError,
response,
'Could not parse API response')
return user
def get_buy_price(self, qty=None, currency=None):
"""https://developers.coinbase.com/api#get-the-buy-price-for-bitcoin"""
data = encode_params({
'qty': qty,
'currency': currency,
})
response = self._get('prices','buy', data=data)
return self._make_api_object(response.json())
def get_sell_price(self, qty=None, currency=None):
"""https://developers.coinbase.com/api#get-the-sell-price"""
data = encode_params({
'qty': qty,
'currency': currency,
})
response = self._get('prices','sell', data=data)
return self._make_api_object(response.json())
def get_spot_price(self, currency=None):
"""https://developers.coinbase.com/api#get-the-spot-price-of-bitcoin"""
data = encode_params({
'currency': currency,
})
response = self._get('prices','spot_rate', data=data)
return self._make_api_object(response.json())
def get_supported_currencies(self):
"""https://developers.coinbase.com/api#currencies"""
response = self._get('currencies')
return self._make_api_object(response.json())
def get_exchange_rates(self):
"""https://developers.coinbase.com/api#list-exchange-rates-between-btc-and-other-currencies"""
response = self._get('currencies', 'exchange_rates')
return self._make_api_object(response.json())
def create_user(self,
email, password, referrer_id=None, client_id=None, scopes=None):
"""https://developers.coinbase.com/api#create-a-new-user"""
data = encode_params({
'user': {
'email': email,
'password': password,
'referrer_id': referrer_id,
'client_id': client_id,
'scopes': (
' '.join(scopes) if isinstance(scopes, (list, tuple)) else scopes),
},
})
response = self._post('users', data=data)
api_obj = self._make_api_object(response.json())
if not api_obj.get('success', False):
raise build_api_error(APIError, response, 'Failed to create a user')
return api_obj
def get_payment_methods(self):
"""https://developers.coinbase.com/api#payment-methods"""
response = self._get('payment_methods')
return self._make_api_object(response.json(), paged_key='payment_methods')
def get_payment_method(self, payment_method_id):
"""https://developers.coinbase.com/api#show-a-payment-method"""
response = self._get('payment_methods', payment_method_id)
api_obj = self._make_api_object(response.json())
payment_method = api_obj.get('payment_method', None)
if not isinstance(payment_method, PaymentMethod):
raise build_api_error(
UnexpectedDataFormatError,
response,
'Could not parse API response')
return payment_method
class OAuthClient(Client):
TOKEN_ENDPOINT_URI = 'https://www.coinbase.com/oauth/token'
def __init__(self,
client_id,
client_secret,
access_token,
refresh_token,
token_endpoint_uri=None,
base_api_uri=None):
if not access_token:
raise ValueError("Missing `access_token`.")
if not refresh_token:
raise ValueError("Missing `refresh_token`.")
if not client_id:
raise ValueError("Missing `client_id`.")
if not client_secret:
raise ValueError("Missing `client_secret`.")
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self.refresh_token = refresh_token
self._set_base_api_uri(base_api_uri or self.BASE_API_URI)
self.TOKEN_ENDPOINT_URI = token_endpoint_uri or self.TOKEN_ENDPOINT_URI
# Set up a requests session for interacting with the API.
self.session = self._build_session(OAuth2Auth, lambda: self.access_token)
def _handle_response(self, response):
# 402 will only be returned if the API endpoint requires that the oauth
# client include the user's 2FA token as a parameter on the request.
if response.status_code == 402:
raise build_api_error(TwoFactorTokenRequired, response)
# Non-authentication errors should be handled by the standard Client
# response processing logic.
if response.status_code != 401:
return super(OAuthClient, self)._handle_response(response)
error_details = _parse_authentication_error(response)
if error_details and error_details.get('id') == 'invalid_token':
if 'expired' in error_details.get('error', ''):
raise build_api_error(ExpiredAccessToken, response)
raise build_api_error(InvalidAccessToken, response)
raise build_api_error(AuthenticationError, response)
def refresh(self):
"""Attempt to refresh the current access token / refresh token pair.
If successful, the relevant attributes of this client will be updated
automatically and the dict of token values and information given by the
Coinbase OAuth server will be returned to the caller.
If unsuccessful, raises a TokenRefreshError.
"""
params = {
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token
}
response = self.session.post(
self.TOKEN_ENDPOINT_URI, params=params, verify=self.VERIFY_SSL)
if not response.status_code == 200:
raise build_api_error(TokenRefreshError, response)
data = response.json()
self.access_token = data.get('access_token')
self.refresh_token = data.get('refresh_token')
return data
def _parse_authentication_error(response):
try:
return response.json()
except ValueError:
pass
auth_header = response.headers.get('www-authenticate', None)
print('auth header:', repr(auth_header))
if auth_header:
header_data = dict(re.findall('([a-zA-Z\_]+)\=\"(.*?)\"', auth_header))
id_ = header_data.get('error')
err_ = header_data.get('error_description')
print(header_data)
print(id_, err_)
if not (id_ and err_):
return None
return {'id': id_, 'error': err_}
return None
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 9 12:52:31 2015
@author: fred
"""
import os
import glob
import numpy as np
import pysac.yt as sacyt
import pysac.mhs_atmosphere as atm
l_mpi=False
scales, physical_constants = \
atm.units_const.get_parameters()
#define the models required
papers = ['paper1','paper2a','paper2b','paper2c','paper2d','mfe_setup']
#papers = ['mfe_setup']
#papers = ['paper1']
oneD_arrays = {}
oned_dataset = []
#loop over all four models
for paper in papers:
datadir = os.path.expanduser('~/Documents/mhs_atmosphere/'+
paper+'/')
figsdir = os.path.expanduser('~/Documents/mhs_atmosphere/figs/'+paper+'/')
if not os.path.exists(figsdir):
os.makedirs(figsdir)
#open all gdf files in the model directory
files = glob.glob(datadir+'/*')
# files = glob.glob(datadir+'/'+papers[0]+'_3Daux.gdf')
# files = glob.glob(datadir+'/'+papers[0]+'.gdf')
files.sort()
print(files)
for file_ in files:
ds = sacyt.SACGDFDataset(file_)
# fkeys = ['thermal_pressure','plasma_beta',
vars_ = ds.index.field_list
for var_ in vars_:
var_field = var_[1]
max_var = np.max(np.abs(ds.index.grids[0][var_field]))/\
ds.index.grids[0][var_field].unit_quantity
var = ds.index.grids[0][var_field]
if max_var > 0.:
# save 1D slices from each variable for plotting
oneD_arrays = atm.mhs_plot.make_1d_slices(ds, var_field, oneD_arrays)
# select the central slice to plot normal to the y-plane
plane, N_2 = 'y', ds.domain_dimensions[1]/2
# lines, contours = True, True
if '1D' in file_:
lines, contours = False, False
else:
if '_HS' in var_field:
lines, contours = False, False
elif 'tension' in var_field or 'balancing' in var_field:
lines, contours = True, False
else:
lines, contours = True, True
if '2c' in file_ or '2d' in file_:
aspect = 2.5
line_density = 0.7
elif '2a' in file_:
aspect = 0.7
line_density = 0.9
elif 'mfe' in file_:
aspect = 1.75
line_density = 1.9
else:
aspect = 0.5
line_density = 1.1
# uncmomment below to plot figures camparble to mfe
# aspect = 1.75
# line_density = 1.9
# save 2D plot in model's figures directory
figname = figsdir+paper+'_'+var_field+'.eps'
atm.mhs_plot.make_2d_plot(ds, var_field, figname,
normal=[plane,N_2],
aspect=aspect, lines=lines,
contours=contours,
model=paper, figxz=[5.5,5.6],
line_density=line_density
)
if ('gas','density') in ds.derived_field_list:
var_field = 'density'
oneD_arrays = atm.mhs_plot.make_1d_slices(ds, var_field, oneD_arrays)
if ('gas','thermal_pressure') in ds.derived_field_list:
var_field = 'thermal_pressure'
oneD_arrays = atm.mhs_plot.make_1d_slices(ds, var_field, oneD_arrays)
figname = figsdir+paper+'_'+var_field+'.eps'
lines, contours = True, True
atm.mhs_plot.make_2d_plot(ds, var_field, figname,
normal=[plane,N_2],
aspect=aspect, lines=lines,
contours=contours,
model=paper, figxz=[5.5,5.6],
line_density=line_density
)
if ('gas','mag_pressure') in ds.derived_field_list:
var_field = 'mag_pressure'
oneD_arrays = atm.mhs_plot.make_1d_slices(ds, var_field, oneD_arrays)
figname = figsdir+paper+'_'+var_field+'.eps'
lines, contours = True, True
atm.mhs_plot.make_2d_plot(ds, var_field, figname,
normal=[plane,N_2],
aspect=aspect, lines=lines,
contours=contours,
model=paper, figxz=[5.5,5.6],
line_density=line_density
)
plot_label = figsdir+paper+'_axis.eps'
# keys = ['alfven_speed','sound_speed','mag_field_z_bg']
ymin = min(oneD_arrays['density']['axis'].in_units('kg / km**3').value.min(),
oneD_arrays['temperature']['edge'].value.min(),
oneD_arrays['temperature']['axis'].value.min(),
oneD_arrays['density']['edge'].in_units('kg / km**3').value.min(),
oneD_arrays['thermal_pressure']['edge'].value.min(),
oneD_arrays['thermal_pressure']['axis'].value.min())*0.5
ymax = max(oneD_arrays['density']['axis'].in_units('kg / km**3').value.max(),
oneD_arrays['temperature']['edge'].value.max(),
oneD_arrays['temperature']['axis'].value.max(),
oneD_arrays['density']['edge'].in_units('kg / km**3').value.max(),
oneD_arrays['thermal_pressure']['edge'].value.max(),
oneD_arrays['thermal_pressure']['axis'].value.max())*2.
if 'mfe_setup' in paper:
loc_legend='lower left'
else:
loc_legend='center right'
keys = ['thermal_pressure','mag_pressure','density','temperature']
subkeys = ['axis']
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = False, empirical=True,
loc_legend=loc_legend, ylim = (0.9*ymin,5*ymax)
)
plot_label = figsdir+paper+'_edge.eps'
keys = ['thermal_pressure','mag_pressure','density','temperature']
subkeys = ['edge']
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = False, empirical=True,
loc_legend=loc_legend, ylim = (0.9*ymin,5*ymax)
)
plot_label = figsdir+paper+'_speeds.eps'
keys = ['alfven_speed','sound_speed']
subkeys = ['mean','min','max']
if 'mfe_setup' in paper:
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = False, loc_legend='lower left',
ylim = (2e3,oneD_arrays['sound_speed']['max'].value.max())
)
else:
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = False, loc_legend='lower right'
)
plot_label = figsdir+paper+'_meanz.eps'
keys = ['thermal_pressure','mag_pressure','density']
subkeys = ['mean','min','max']
if 'mfe_setup' in paper:
ymax = oneD_arrays['density']['max'].in_units('kg / km**3').value.max()
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = False, loc_legend='upper right',
ylim = (1e-2,1.1*ymax)
)
else:
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = False, loc_legend='upper right'
)
plot_label = figsdir+paper+'_beta.eps'
keys = ['plasma_beta','mag_pressure','thermal_pressure']
subkeys = ['mean','min','max']
if 'mfe_setup' in paper:
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = False, loc_legend='upper right',
ylim = (1e-3,1.1*oneD_arrays['plasma_beta']['max'].max())
)
else:
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = False, loc_legend='upper right'
)
plot_label = figsdir+paper+'_compare.eps'
mfe_Bz = np.load('plots/mfe_Bz_2015.npy')
mfe_Z = np.load('plots/mfe_zz.npy')
import matplotlib.pyplot as plt
plt.figure(figsize=[6.47,4.0])
if 'mfe_setup' in paper:
plt.plot(mfe_Z, mfe_Bz,
'b-', label=r"MFE '15 $B_z$(axis)"
)
plt.plot(oneD_arrays['mag_field_z_bg']['Z'],
oneD_arrays['mag_field_z_bg']['axis'], 'm-.',
label=r"GFE '15 $B_z$(axis)", lw=2.0
)
plt.gca().set_yscale('log',subsy=[5,10])
plt.xlabel('Height [Mm]')
plt.ylabel(r'$B_z$ [T]')
plt.legend(loc='lower left')
plt.ylim(4e-4,1.25e-1)
plt.subplots_adjust(bottom=0.125)
plt.savefig(plot_label)
else:
plt.plot(mfe_Z, mfe_Bz,
'b-', label=r"MFE '15 $B_z$(axis)"
)
plt.plot(oneD_arrays['mag_field_z_bg']['Z'],
oneD_arrays['mag_field_z_bg']['axis'], 'm-.',
label=r"GFE '15 $B_z$(axis)", lw=2.0
)
plt.gca().set_yscale('log',subsy=[5,10])
plt.xlabel('Height [Mm]')
plt.ylabel(r'$B_z$ [T]')
plt.legend(loc='lower left')
ymin = oneD_arrays['mag_field_z_bg']['axis'].min().value
plt.ylim(0.05*ymin,1.25e-1)
plt.subplots_adjust(bottom=0.125)
plt.savefig(plot_label)
if 'paper1' in paper:
plt.figure(figsize=[6.47,4.0])
import astropy.units as u
from pysac.mhs_atmosphere.parameters.model_pars import paper1 as model_pars
coords = atm.parameters.model_pars.get_coords(model_pars['Nxyz'], u.Quantity(model_pars['xyz']))
empirical_data = atm.hs_atmosphere.read_VAL3c_MTW(mu=physical_constants['mu'])
table = atm.hs_atmosphere.interpolate_atmosphere(empirical_data,
coords['Zext']
)
rho = u.Quantity(table['rho'], copy=True).to('kg km-3').value
Z = u.Quantity(table['Z'], copy=True).to('Mm').value
p = u.Quantity(table['p'], copy=True).to('Pa').value
plt.plot(Z,rho,'m-.',lw=2.0,label=r"density")
plt.plot(Z,table['T'],'r:',lw=2.0,label=r"temperature")
plt.plot(Z,p,'g-',lw=2.0,label=r"pressure")
plt.gca().set_yscale('log',subsy=[5,10])
plt.xlabel('Height [Mm]')
plt.ylabel(r'$p$ [Pa], $T$ [K], $\rho$ [kg km$^{-3}$]')
plt.legend(loc='center right')
plt.subplots_adjust(bottom=0.125)
plt.xlim(0,8.8)
plt.savefig(figsdir+'data_match.eps')
|
|
"""Tests for Konnected Alarm Panel config flow."""
from unittest.mock import patch
import pytest
from homeassistant.components import konnected
from homeassistant.components.konnected import config_flow
from tests.common import MockConfigEntry
@pytest.fixture(name="mock_panel")
async def mock_panel_fixture():
"""Mock a Konnected Panel bridge."""
with patch("konnected.Client", autospec=True) as konn_client:
def mock_constructor(host, port, websession):
"""Fake the panel constructor."""
konn_client.host = host
konn_client.port = port
return konn_client
konn_client.side_effect = mock_constructor
konn_client.ClientError = config_flow.CannotConnect
yield konn_client
async def test_flow_works(hass, mock_panel):
"""Test config flow ."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"model": "Konnected",
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"port": 1234, "host": "1.2.3.4"}
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel",
"id": "112233445566",
"host": "1.2.3.4",
"port": 1234,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"]["host"] == "1.2.3.4"
assert result["data"]["port"] == 1234
assert result["data"]["model"] == "Konnected"
assert len(result["data"]["access_token"]) == 20 # confirm generated token size
assert result["data"]["default_options"] == config_flow.OPTIONS_SCHEMA(
{config_flow.CONF_IO: {}}
)
async def test_pro_flow_works(hass, mock_panel):
"""Test config flow ."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# pro uses chipId instead of MAC as unique id
mock_panel.get_status.return_value = {
"chipId": "1234567",
"mac": "11:22:33:44:55:66",
"model": "Konnected Pro",
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"port": 1234, "host": "1.2.3.4"}
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel Pro",
"id": "1234567",
"host": "1.2.3.4",
"port": 1234,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"]["host"] == "1.2.3.4"
assert result["data"]["port"] == 1234
assert result["data"]["model"] == "Konnected Pro"
assert len(result["data"]["access_token"]) == 20 # confirm generated token size
assert result["data"]["default_options"] == config_flow.OPTIONS_SCHEMA(
{config_flow.CONF_IO: {}}
)
async def test_ssdp(hass, mock_panel):
"""Test a panel being discovered."""
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"model": "Konnected",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "ssdp"},
data={
"ssdp_location": "http://1.2.3.4:1234/Device.xml",
"manufacturer": config_flow.KONN_MANUFACTURER,
"modelName": config_flow.KONN_MODEL,
},
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel",
"id": "112233445566",
"host": "1.2.3.4",
"port": 1234,
}
async def test_import_no_host_user_finish(hass, mock_panel):
"""Test importing a panel with no host info."""
mock_panel.get_status.return_value = {
"mac": "aa:bb:cc:dd:ee:ff",
"model": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data={
"default_options": {
"blink": True,
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "aabbccddeeff",
},
)
assert result["type"] == "form"
assert result["step_id"] == "import_confirm"
assert result["description_placeholders"]["id"] == "aabbccddeeff"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# confirm user is prompted to enter host
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"host": "1.1.1.1", "port": 1234}
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel Pro",
"id": "aabbccddeeff",
"host": "1.1.1.1",
"port": 1234,
}
# final confirmation
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
async def test_import_ssdp_host_user_finish(hass, mock_panel):
"""Test importing a pro panel with no host info which ssdp discovers."""
mock_panel.get_status.return_value = {
"chipId": "somechipid",
"mac": "11:22:33:44:55:66",
"model": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data={
"default_options": {
"blink": True,
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "somechipid",
},
)
assert result["type"] == "form"
assert result["step_id"] == "import_confirm"
assert result["description_placeholders"]["id"] == "somechipid"
# discover the panel via ssdp
ssdp_result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "ssdp"},
data={
"ssdp_location": "http://0.0.0.0:1234/Device.xml",
"manufacturer": config_flow.KONN_MANUFACTURER,
"modelName": config_flow.KONN_MODEL_PRO,
},
)
assert ssdp_result["type"] == "abort"
assert ssdp_result["reason"] == "already_in_progress"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel Pro",
"id": "somechipid",
"host": "0.0.0.0",
"port": 1234,
}
# final confirmation
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
async def test_ssdp_already_configured(hass, mock_panel):
"""Test if a discovered panel has already been configured."""
MockConfigEntry(
domain="konnected",
data={"host": "0.0.0.0", "port": 1234},
unique_id="112233445566",
).add_to_hass(hass)
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"model": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "ssdp"},
data={
"ssdp_location": "http://0.0.0.0:1234/Device.xml",
"manufacturer": config_flow.KONN_MANUFACTURER,
"modelName": config_flow.KONN_MODEL_PRO,
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_ssdp_host_update(hass, mock_panel):
"""Test if a discovered panel has already been configured but changed host."""
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "11223344556677889900",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"2": "Binary Sensor",
"6": "Binary Sensor",
"10": "Binary Sensor",
"3": "Digital Sensor",
"7": "Digital Sensor",
"11": "Binary Sensor",
"4": "Switchable Output",
"out1": "Switchable Output",
"alarm1": "Switchable Output",
},
"binary_sensors": [
{"zone": "2", "type": "door"},
{"zone": "6", "type": "window", "name": "winder", "inverse": True},
{"zone": "10", "type": "door"},
{"zone": "11", "type": "window"},
],
"sensors": [
{"zone": "3", "type": "dht"},
{"zone": "7", "type": "ds18b20", "name": "temper"},
],
"switches": [
{"zone": "4"},
{
"zone": "8",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "out1"},
{"zone": "alarm1"},
],
}
)
MockConfigEntry(
domain="konnected",
data=device_config,
options=device_options,
unique_id="112233445566",
).add_to_hass(hass)
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"model": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "ssdp"},
data={
"ssdp_location": "http://1.1.1.1:1234/Device.xml",
"manufacturer": config_flow.KONN_MANUFACTURER,
"modelName": config_flow.KONN_MODEL_PRO,
},
)
assert result["type"] == "abort"
# confirm the host value was updated, access_token was not
entry = hass.config_entries.async_entries(config_flow.DOMAIN)[0]
assert entry.data["host"] == "1.1.1.1"
assert entry.data["port"] == 1234
assert entry.data["access_token"] == "11223344556677889900"
async def test_import_existing_config(hass, mock_panel):
"""Test importing a host with an existing config file."""
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"model": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data=konnected.DEVICE_SCHEMA_YAML(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"binary_sensors": [
{"zone": "2", "type": "door"},
{"zone": 6, "type": "window", "name": "winder", "inverse": True},
{"zone": "10", "type": "door"},
{"zone": "11", "type": "window"},
],
"sensors": [
{"zone": "3", "type": "dht"},
{"zone": 7, "type": "ds18b20", "name": "temper"},
],
"switches": [
{"zone": "4"},
{
"zone": 8,
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{
"zone": 8,
"name": "alarm",
"activation": "low",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
{"zone": "out1"},
{"zone": "alarm1"},
],
}
),
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"] == {
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": result["data"]["access_token"],
"default_options": {
"io": {
"1": "Disabled",
"5": "Disabled",
"9": "Disabled",
"12": "Disabled",
"out": "Disabled",
"alarm2_out2": "Disabled",
"2": "Binary Sensor",
"6": "Binary Sensor",
"10": "Binary Sensor",
"3": "Digital Sensor",
"7": "Digital Sensor",
"11": "Binary Sensor",
"4": "Switchable Output",
"8": "Switchable Output",
"out1": "Switchable Output",
"alarm1": "Switchable Output",
},
"blink": True,
"api_host": "",
"discovery": True,
"binary_sensors": [
{"zone": "2", "type": "door", "inverse": False},
{"zone": "6", "type": "window", "name": "winder", "inverse": True},
{"zone": "10", "type": "door", "inverse": False},
{"zone": "11", "type": "window", "inverse": False},
],
"sensors": [
{"zone": "3", "type": "dht", "poll_interval": 3},
{"zone": "7", "type": "ds18b20", "name": "temper", "poll_interval": 3},
],
"switches": [
{"activation": "high", "zone": "4"},
{
"zone": "8",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{
"zone": "8",
"name": "alarm",
"activation": "low",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
{"activation": "high", "zone": "out1"},
{"activation": "high", "zone": "alarm1"},
],
},
}
async def test_import_existing_config_entry(hass, mock_panel):
"""Test importing a host that has an existing config entry."""
MockConfigEntry(
domain="konnected",
data={
"host": "0.0.0.0",
"port": 1111,
"access_token": "ORIGINALTOKEN",
"id": "112233445566",
"extra": "something",
},
unique_id="112233445566",
).add_to_hass(hass)
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"model": "Konnected Pro",
}
# utilize a global access token this time
hass.data[config_flow.DOMAIN] = {"access_token": "SUPERSECRETTOKEN"}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data={
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"default_options": {
"blink": True,
"discovery": True,
"io": {
"1": "Disabled",
"10": "Binary Sensor",
"11": "Disabled",
"12": "Disabled",
"2": "Binary Sensor",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Binary Sensor",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
"binary_sensors": [
{"inverse": False, "type": "door", "zone": "2"},
{"inverse": True, "type": "Window", "name": "winder", "zone": "6"},
{"inverse": False, "type": "door", "zone": "10"},
],
},
},
)
assert result["type"] == "abort"
# We should have updated the host info but not the access token
assert len(hass.config_entries.async_entries("konnected")) == 1
assert hass.config_entries.async_entries("konnected")[0].data == {
"host": "1.2.3.4",
"port": 1234,
"access_token": "ORIGINALTOKEN",
"id": "112233445566",
"model": "Konnected Pro",
"extra": "something",
}
async def test_import_pin_config(hass, mock_panel):
"""Test importing a host with an existing config file that specifies pin configs."""
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"model": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data=konnected.DEVICE_SCHEMA_YAML(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"binary_sensors": [
{"pin": 1, "type": "door"},
{"pin": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door"},
],
"sensors": [
{"zone": 4, "type": "dht"},
{"pin": "7", "type": "ds18b20", "name": "temper"},
],
"switches": [
{
"pin": "8",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "6"},
],
}
),
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"] == {
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": result["data"]["access_token"],
"default_options": {
"io": {
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"out1": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"1": "Binary Sensor",
"2": "Binary Sensor",
"3": "Binary Sensor",
"4": "Digital Sensor",
"5": "Digital Sensor",
"6": "Switchable Output",
"out": "Switchable Output",
},
"blink": True,
"api_host": "",
"discovery": True,
"binary_sensors": [
{"zone": "1", "type": "door", "inverse": False},
{"zone": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door", "inverse": False},
],
"sensors": [
{"zone": "4", "type": "dht", "poll_interval": 3},
{"zone": "5", "type": "ds18b20", "name": "temper", "poll_interval": 3},
],
"switches": [
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"activation": "high", "zone": "6"},
],
},
}
async def test_option_flow(hass, mock_panel):
"""Test config flow options."""
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected",
"access_token": "11223344556677889900",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA({"io": {}})
entry = MockConfigEntry(
domain="konnected",
data=device_config,
options=device_options,
unique_id="112233445566",
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_io"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"1": "Disabled",
"2": "Binary Sensor",
"3": "Digital Sensor",
"4": "Switchable Output",
"5": "Disabled",
"6": "Binary Sensor",
"out": "Switchable Output",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
assert result["description_placeholders"] == {
"zone": "Zone 2",
}
# zone 2
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "door"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
assert result["description_placeholders"] == {
"zone": "Zone 6",
}
# zone 6
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"type": "window", "name": "winder", "inverse": True},
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
assert result["description_placeholders"] == {
"zone": "Zone 3",
}
# zone 3
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "dht"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
assert result["description_placeholders"] == {
"zone": "Zone 4",
"state": "1",
}
# zone 4
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
assert result["description_placeholders"] == {
"zone": "OUT",
"state": "1",
}
# zone out
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
"more_states": "Yes",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
assert result["description_placeholders"] == {
"zone": "OUT",
"state": "2",
}
# zone out - state 2
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"name": "alarm",
"activation": "low",
"momentary": 100,
"pause": 100,
"repeat": -1,
"more_states": "No",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_misc"
# make sure we enforce url format
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"discovery": False,
"blink": True,
"override_api_host": True,
"api_host": "badhosturl",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_misc"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"discovery": False,
"blink": True,
"override_api_host": True,
"api_host": "http://overridehost:1111",
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
"io": {
"2": "Binary Sensor",
"3": "Digital Sensor",
"4": "Switchable Output",
"6": "Binary Sensor",
"out": "Switchable Output",
},
"discovery": False,
"blink": True,
"api_host": "http://overridehost:1111",
"binary_sensors": [
{"zone": "2", "type": "door", "inverse": False},
{"zone": "6", "type": "window", "name": "winder", "inverse": True},
],
"sensors": [{"zone": "3", "type": "dht", "poll_interval": 3}],
"switches": [
{"activation": "high", "zone": "4"},
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{
"zone": "out",
"name": "alarm",
"activation": "low",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
],
}
async def test_option_flow_pro(hass, mock_panel):
"""Test config flow options for pro board."""
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "11223344556677889900",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA({"io": {}})
entry = MockConfigEntry(
domain="konnected",
data=device_config,
options=device_options,
unique_id="112233445566",
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_io"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"1": "Disabled",
"2": "Binary Sensor",
"3": "Digital Sensor",
"4": "Switchable Output",
"5": "Disabled",
"6": "Binary Sensor",
"7": "Digital Sensor",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_io_ext"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"8": "Switchable Output",
"9": "Disabled",
"10": "Binary Sensor",
"11": "Binary Sensor",
"12": "Disabled",
"out1": "Switchable Output",
"alarm1": "Switchable Output",
"alarm2_out2": "Disabled",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 2
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "door"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 6
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"type": "window", "name": "winder", "inverse": True},
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 10
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "door"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 11
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "window"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
# zone 3
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "dht"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
# zone 7
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "ds18b20", "name": "temper"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone 4
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone 8
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone out1
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone alarm1
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "options_misc"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"discovery": False, "blink": True, "override_api_host": False},
)
assert result["type"] == "create_entry"
assert result["data"] == {
"io": {
"10": "Binary Sensor",
"11": "Binary Sensor",
"2": "Binary Sensor",
"3": "Digital Sensor",
"4": "Switchable Output",
"6": "Binary Sensor",
"7": "Digital Sensor",
"8": "Switchable Output",
"alarm1": "Switchable Output",
"out1": "Switchable Output",
},
"discovery": False,
"blink": True,
"api_host": "",
"binary_sensors": [
{"zone": "2", "type": "door", "inverse": False},
{"zone": "6", "type": "window", "name": "winder", "inverse": True},
{"zone": "10", "type": "door", "inverse": False},
{"zone": "11", "type": "window", "inverse": False},
],
"sensors": [
{"zone": "3", "type": "dht", "poll_interval": 3},
{"zone": "7", "type": "ds18b20", "name": "temper", "poll_interval": 3},
],
"switches": [
{"activation": "high", "zone": "4"},
{
"zone": "8",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"activation": "high", "zone": "out1"},
{"activation": "high", "zone": "alarm1"},
],
}
async def test_option_flow_import(hass, mock_panel):
"""Test config flow options imported from configuration.yaml."""
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Digital Sensor",
"3": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "window", "name": "winder", "inverse": True},
],
"sensors": [{"zone": "2", "type": "ds18b20", "name": "temper"}],
"switches": [
{
"zone": "3",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{
"zone": "3",
"name": "alarm",
"activation": "low",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
],
}
)
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "11223344556677889900",
"default_options": device_options,
}
)
entry = MockConfigEntry(
domain="konnected", data=device_config, unique_id="112233445566"
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_io"
# confirm the defaults are set based on current config - we"ll spot check this throughout
schema = result["data_schema"]({})
assert schema["1"] == "Binary Sensor"
assert schema["2"] == "Digital Sensor"
assert schema["3"] == "Switchable Output"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"1": "Binary Sensor",
"2": "Digital Sensor",
"3": "Switchable Output",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_io_ext"
schema = result["data_schema"]({})
assert schema["8"] == "Disabled"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={},
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 1
schema = result["data_schema"]({})
assert schema["type"] == "window"
assert schema["name"] == "winder"
assert schema["inverse"] is True
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "door"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
# zone 2
schema = result["data_schema"]({})
assert schema["type"] == "ds18b20"
assert schema["name"] == "temper"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"type": "dht"},
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone 3
schema = result["data_schema"]({})
assert schema["name"] == "switcher"
assert schema["activation"] == "low"
assert schema["momentary"] == 50
assert schema["pause"] == 100
assert schema["repeat"] == 4
assert schema["more_states"] == "Yes"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"activation": "high", "more_states": "No"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_misc"
schema = result["data_schema"]({})
assert schema["blink"] is True
assert schema["discovery"] is True
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"discovery": True, "blink": False, "override_api_host": False},
)
# verify the updated fields
assert result["type"] == "create_entry"
assert result["data"] == {
"io": {"1": "Binary Sensor", "2": "Digital Sensor", "3": "Switchable Output"},
"discovery": True,
"blink": False,
"api_host": "",
"binary_sensors": [
{"zone": "1", "type": "door", "inverse": True, "name": "winder"},
],
"sensors": [
{"zone": "2", "type": "dht", "poll_interval": 3, "name": "temper"},
],
"switches": [
{
"zone": "3",
"name": "switcher",
"activation": "high",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
],
}
async def test_option_flow_existing(hass, mock_panel):
"""Test config flow options with existing already in place."""
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Digital Sensor",
"3": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "window", "name": "winder", "inverse": True},
],
"sensors": [{"zone": "2", "type": "ds18b20", "name": "temper"}],
"switches": [
{
"zone": "3",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
],
}
)
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "11223344556677889900",
"default_options": config_flow.OPTIONS_SCHEMA({"io": {}}),
}
)
entry = MockConfigEntry(
domain="konnected",
data=device_config,
options=device_options,
unique_id="112233445566",
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_io"
# confirm the defaults are pulled in from the existing options
schema = result["data_schema"]({})
assert schema["1"] == "Binary Sensor"
assert schema["2"] == "Digital Sensor"
assert schema["3"] == "Switchable Output"
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from functools import cmp_to_key
import pydoc
from docutils import core
from docutils import nodes
import six
from sphinx.util import compat
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import plugin_manager
from heat.engine import properties
from heat.engine import support
_CODE_NAMES = {'2013.1': 'Grizzly',
'2013.2': 'Havana',
'2014.1': 'Icehouse',
'2014.2': 'Juno',
'2015.1': 'Kilo',
'5.0.0': 'Liberty',
'6.0.0': 'Mitaka'}
all_resources = {}
class integratedrespages(nodes.General, nodes.Element):
pass
class unsupportedrespages(nodes.General, nodes.Element):
pass
class contribresourcepages(nodes.General, nodes.Element):
pass
class ResourcePages(compat.Directive):
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {}
def path(self):
return None
def statuses(self):
return support.SUPPORT_STATUSES
def run(self):
prefix = self.arguments and self.arguments.pop() or None
content = []
for resource_type, resource_classes in _filter_resources(
prefix, self.path(), self.statuses()):
for resource_class in resource_classes:
self.resource_type = resource_type
self.resource_class = resource_class
section = self._section(content, resource_type, '%s')
self.props_schemata = properties.schemata(
self.resource_class.properties_schema)
self.attrs_schemata = attributes.schemata(
self.resource_class.attributes_schema)
# NOTE(prazumovsky): Adding base_attributes_schema dict to
# Resource class should means adding new attributes from this
# dict to documentation of each resource, else there is no
# chance to learn about base attributes.
self.attrs_schemata.update(
self.resource_class.base_attributes_schema)
self.update_policy_schemata = properties.schemata(
self.resource_class.update_policy_schema)
self._status_str(resource_class.support_status, section)
cls_doc = pydoc.getdoc(resource_class)
if cls_doc:
# allow for rst in the class comments
cls_nodes = core.publish_doctree(cls_doc).children
section.extend(cls_nodes)
self.contribute_properties(section)
self.contribute_attributes(section)
self.contribute_update_policy(section)
self.contribute_hot_syntax(section)
return content
def _version_str(self, version):
if version in _CODE_NAMES:
return _("%(version)s (%(code)s)") % {'version': version,
'code': _CODE_NAMES[version]}
else:
return version
def _status_str(self, support_status, section):
while support_status is not None:
sstatus = support_status.to_dict()
if sstatus['status'] is support.SUPPORTED:
msg = _('Available')
else:
msg = sstatus['status']
if sstatus['version'] is not None:
msg = _('%(msg)s since %(version)s') % {
'msg': msg,
'version': self._version_str(sstatus['version'])}
if sstatus['message'] is not None:
msg = _('%(msg)s - %(status_msg)s') % {
'msg': msg,
'status_msg': sstatus['message']}
if not (sstatus['status'] == support.SUPPORTED and
sstatus['version'] is None):
para = nodes.paragraph('', msg)
note = nodes.note('', para)
section.append(note)
support_status = support_status.previous_status
def _section(self, parent, title, id_pattern):
id = id_pattern % self.resource_type
section = nodes.section(ids=[id])
parent.append(section)
title = nodes.title('', title)
section.append(title)
return section
def _prop_syntax_example(self, prop):
if not prop:
return 'Value'
if prop.type == properties.Schema.LIST:
def schema(i):
return prop.schema[i] if prop.schema else None
sub_type = [self._prop_syntax_example(schema(i))
for i in range(2)]
return '[%s, %s, ...]' % tuple(sub_type)
elif prop.type == properties.Schema.MAP:
def sub_props():
for sub_key, sub_value in prop.schema.items():
if sub_value.implemented:
yield '"%s": %s' % (
sub_key, self._prop_syntax_example(sub_value))
return '{%s}' % (', '.join(sub_props()) if prop.schema else '...')
else:
return prop.type
def contribute_hot_syntax(self, parent):
section = self._section(parent, _('HOT Syntax'), '%s-hot')
props = []
for prop_key in sorted(six.iterkeys(self.props_schemata)):
prop = self.props_schemata[prop_key]
if (prop.implemented
and prop.support_status.status == support.SUPPORTED):
props.append('%s: %s' % (prop_key,
self._prop_syntax_example(prop)))
props_str = ''
if props:
props_str = '''\n properties:
%s''' % ('\n '.join(props))
template = '''heat_template_version: 2015-04-30
...
resources:
...
the_resource:
type: %s%s''' % (self.resource_type, props_str)
block = nodes.literal_block(template, template, language="yaml")
section.append(block)
@staticmethod
def cmp_prop(x, y):
x_key, x_prop = x
y_key, y_prop = y
if x_prop.support_status.status == y_prop.support_status.status:
return (x_key > y_key) - (x_key < y_key)
x_status = x_prop.support_status.status
y_status = y_prop.support_status.status
if x_status == support.SUPPORTED:
return -1
if x_status == support.DEPRECATED:
return 1
return (x_status > y_status) - (x_status < y_status)
def contribute_property(self, parent, prop_key, prop, upd_para=None,
id_pattern_prefix=None):
if not id_pattern_prefix:
id_pattern_prefix = '%s-prop'
id_pattern = id_pattern_prefix + '-' + prop_key
definition = self._section(parent, prop_key, id_pattern)
self._status_str(prop.support_status, definition)
if not prop.implemented:
para = nodes.paragraph('', _('Not implemented.'))
note = nodes.note('', para)
definition.append(note)
return
if prop.description:
para = nodes.paragraph('', prop.description)
definition.append(para)
type = nodes.paragraph('', _('%s value expected.') % prop.type)
definition.append(type)
if upd_para is not None:
definition.append(upd_para)
else:
if prop.update_allowed:
upd_para = nodes.paragraph(
'', _('Can be updated without replacement.'))
definition.append(upd_para)
elif prop.immutable:
upd_para = nodes.paragraph('', _('Updates are not supported. '
'Resource update will fail on'
' any attempt to update this '
'property.'))
definition.append(upd_para)
else:
upd_para = nodes.paragraph('', _('Updates cause replacement.'))
definition.append(upd_para)
if prop.default is not None:
para = nodes.paragraph('', _('Defaults to "%s".') % prop.default)
definition.append(para)
for constraint in prop.constraints:
para = nodes.paragraph('', str(constraint))
definition.append(para)
sub_schema = None
if prop.schema and prop.type == properties.Schema.MAP:
para = nodes.paragraph()
emph = nodes.emphasis('', _('Map properties:'))
para.append(emph)
definition.append(para)
sub_schema = prop.schema
elif prop.schema and prop.type == properties.Schema.LIST:
para = nodes.paragraph()
emph = nodes.emphasis('', _('List contents:'))
para.append(emph)
definition.append(para)
sub_schema = prop.schema
if sub_schema:
for _key, _prop in sorted(sub_schema.items(),
key=cmp_to_key(self.cmp_prop)):
if _prop.support_status.status != support.HIDDEN:
indent = nodes.block_quote()
definition.append(indent)
self.contribute_property(
indent, _key, _prop, upd_para, id_pattern)
def contribute_properties(self, parent):
if not self.props_schemata:
return
props = dict((k, v) for k, v in self.props_schemata.items()
if v.support_status.status != support.HIDDEN)
required_props = dict((k, v) for k, v in props.items()
if v.required)
if required_props:
section = self._section(
parent, _('Required Properties'), '%s-props-req')
for prop_key, prop in sorted(required_props.items(),
key=cmp_to_key(self.cmp_prop)):
self.contribute_property(section, prop_key, prop)
optional_props = dict((k, v) for k, v in props.items()
if not v.required)
if optional_props:
section = self._section(
parent, _('Optional Properties'), '%s-props-opt')
for prop_key, prop in sorted(optional_props.items(),
key=cmp_to_key(self.cmp_prop)):
self.contribute_property(section, prop_key, prop)
def contribute_attributes(self, parent):
if not self.attrs_schemata:
return
section = self._section(parent, _('Attributes'), '%s-attrs')
for prop_key, prop in sorted(self.attrs_schemata.items()):
if prop.support_status.status != support.HIDDEN:
description = prop.description
attr_section = self._section(
section, prop_key, '%s-attr-' + prop_key)
self._status_str(prop.support_status, attr_section)
if description:
def_para = nodes.paragraph('', description)
attr_section.append(def_para)
def contribute_update_policy(self, parent):
if not self.update_policy_schemata:
return
section = self._section(parent, _('update_policy'), '%s-updpolicy')
for _key, _prop in sorted(self.update_policy_schemata.items(),
key=cmp_to_key(self.cmp_prop)):
self.contribute_property(section, _key, _prop)
class IntegrateResourcePages(ResourcePages):
def path(self):
return 'heat.engine.resources'
def statuses(self):
return [support.SUPPORTED]
class UnsupportedResourcePages(ResourcePages):
def path(self):
return 'heat.engine.resources'
def statuses(self):
return [s for s in support.SUPPORT_STATUSES if s != support.SUPPORTED]
class ContribResourcePages(ResourcePages):
def path(self):
return 'heat.engine.plugins'
def _filter_resources(prefix=None, path=None, statuses=None):
def not_hidden_match(cls):
return cls.support_status.status != support.HIDDEN
def prefix_match(name):
return prefix is None or name.startswith(prefix)
def path_match(cls):
return path is None or cls.__module__.startswith(path)
def status_match(cls):
return cls.support_status.status in statuses
statuses = statuses or []
filtered_resources = {}
for name in sorted(six.iterkeys(all_resources)):
if prefix_match(name):
for cls in all_resources.get(name):
if (path_match(cls) and status_match(cls) and
not_hidden_match(cls)):
if filtered_resources.get(name) is not None:
filtered_resources[name].append(cls)
else:
filtered_resources[name] = [cls]
return sorted(six.iteritems(filtered_resources))
def _load_all_resources():
manager = plugin_manager.PluginManager('heat.engine.resources')
resource_mapping = plugin_manager.PluginMapping('resource')
res_plugin_mappings = resource_mapping.load_all(manager)
for mapping in res_plugin_mappings:
name, cls = mapping
if all_resources.get(name) is not None:
all_resources[name].append(cls)
else:
all_resources[name] = [cls]
def link_resource(app, env, node, contnode):
reftarget = node.attributes['reftarget']
for resource_name in all_resources:
if resource_name.lower() == reftarget.lower():
refnode = nodes.reference('', '', internal=True)
refnode['reftitle'] = resource_name
if resource_name.startswith('AWS'):
source = 'template_guide/cfn'
else:
source = 'template_guide/openstack'
uri = app.builder.get_relative_uri(
node.attributes['refdoc'], source)
refnode['refuri'] = '%s#%s' % (uri, resource_name)
refnode.append(contnode)
return refnode
def setup(app):
_load_all_resources()
app.add_node(integratedrespages)
app.add_directive('integratedrespages', IntegrateResourcePages)
app.add_node(unsupportedrespages)
app.add_directive('unsupportedrespages', UnsupportedResourcePages)
app.add_node(contribresourcepages)
app.add_directive('contribrespages', ContribResourcePages)
app.connect('missing-reference', link_resource)
|
|
from sympy import (
adjoint, And, Basic, conjugate, diff, expand, Eq, Function, I, im,
Integral, integrate, Interval, lambdify, log, Max, Min, oo, Or, pi,
Piecewise, piecewise_fold, Rational, re, solve, symbols, transpose,
cos, exp, Abs, Not
)
from sympy.utilities.pytest import XFAIL, raises
x, y = symbols('x y')
z = symbols('z', nonzero=True)
def test_piecewise():
# Test canonization
assert Piecewise((x, x < 1), (0, True)) == Piecewise((x, x < 1), (0, True))
assert Piecewise((x, x < 1), (0, True), (1, True)) == \
Piecewise((x, x < 1), (0, True))
assert Piecewise((x, x < 1), (0, False), (-1, 1 > 2)) == \
Piecewise((x, x < 1))
assert Piecewise((x, x < 1), (0, x < 1), (0, True)) == \
Piecewise((x, x < 1), (0, True))
assert Piecewise((x, x < 1), (0, x < 2), (0, True)) == \
Piecewise((x, x < 1), (0, True))
assert Piecewise((x, x < 1), (x, x < 2), (0, True)) == \
Piecewise((x, Or(x < 1, x < 2)), (0, True))
assert Piecewise((x, x < 1), (x, x < 2), (x, True)) == x
assert Piecewise((x, True)) == x
raises(TypeError, lambda: Piecewise(x))
raises(TypeError, lambda: Piecewise((x, x**2)))
# Test subs
p = Piecewise((-1, x < -1), (x**2, x < 0), (log(x), x >= 0))
p_x2 = Piecewise((-1, x**2 < -1), (x**4, x**2 < 0), (log(x**2), x**2 >= 0))
assert p.subs(x, x**2) == p_x2
assert p.subs(x, -5) == -1
assert p.subs(x, -1) == 1
assert p.subs(x, 1) == log(1)
# More subs tests
p2 = Piecewise((1, x < pi), (-1, x < 2*pi), (0, x > 2*pi))
p3 = Piecewise((1, Eq(x, 0)), (1/x, True))
p4 = Piecewise((1, Eq(x, 0)), (2, 1/x>2))
assert p2.subs(x, 2) == 1
assert p2.subs(x, 4) == -1
assert p2.subs(x, 10) == 0
assert p3.subs(x, 0.0) == 1
assert p4.subs(x, 0.0) == 1
f, g, h = symbols('f,g,h', cls=Function)
pf = Piecewise((f(x), x < -1), (f(x) + h(x) + 2, x <= 1))
pg = Piecewise((g(x), x < -1), (g(x) + h(x) + 2, x <= 1))
assert pg.subs(g, f) == pf
assert Piecewise((1, Eq(x, 0)), (0, True)).subs(x, 0) == 1
assert Piecewise((1, Eq(x, 0)), (0, True)).subs(x, 1) == 0
assert Piecewise((1, Eq(x, y)), (0, True)).subs(x, y) == 1
assert Piecewise((1, Eq(x, z)), (0, True)).subs(x, z) == 1
assert Piecewise((1, Eq(exp(x), cos(z))), (0, True)).subs(x, z) == \
Piecewise((1, Eq(exp(z), cos(z))), (0, True))
assert Piecewise((1, Eq(x, y*(y + 1))), (0, True)).subs(x, y**2 + y) == 1
p5 = Piecewise( (0, Eq(cos(x) + y, 0)), (1, True))
assert p5.subs(y, 0) == Piecewise( (0, Eq(cos(x), 0)), (1, True))
# Test evalf
assert p.evalf() == p
assert p.evalf(subs={x: -2}) == -1
assert p.evalf(subs={x: -1}) == 1
assert p.evalf(subs={x: 1}) == log(1)
# Test doit
f_int = Piecewise((Integral(x, (x, 0, 1)), x < 1))
assert f_int.doit() == Piecewise( (1.0/2.0, x < 1) )
# Test differentiation
f = x
fp = x*p
dp = Piecewise((0, x < -1), (2*x, x < 0), (1/x, x >= 0))
fp_dx = x*dp + p
assert diff(p, x) == dp
assert diff(f*p, x) == fp_dx
# Test simple arithmetic
assert x*p == fp
assert x*p + p == p + x*p
assert p + f == f + p
assert p + dp == dp + p
assert p - dp == -(dp - p)
# Test power
dp2 = Piecewise((0, x < -1), (4*x**2, x < 0), (1/x**2, x >= 0))
assert dp**2 == dp2
# Test _eval_interval
f1 = x*y + 2
f2 = x*y**2 + 3
peval = Piecewise( (f1, x < 0), (f2, x > 0))
peval_interval = f1.subs(
x, 0) - f1.subs(x, -1) + f2.subs(x, 1) - f2.subs(x, 0)
assert peval._eval_interval(x, 0, 0) == 0
assert peval._eval_interval(x, -1, 1) == peval_interval
peval2 = Piecewise((f1, x < 0), (f2, True))
assert peval2._eval_interval(x, 0, 0) == 0
assert peval2._eval_interval(x, 1, -1) == -peval_interval
assert peval2._eval_interval(x, -1, -2) == f1.subs(x, -2) - f1.subs(x, -1)
assert peval2._eval_interval(x, -1, 1) == peval_interval
assert peval2._eval_interval(x, None, 0) == peval2.subs(x, 0)
assert peval2._eval_interval(x, -1, None) == -peval2.subs(x, -1)
# Test integration
p_int = Piecewise((-x, x < -1), (x**3/3.0, x < 0), (-x + x*log(x), x >= 0))
assert integrate(p, x) == p_int
p = Piecewise((x, x < 1), (x**2, -1 <= x), (x, 3 < x))
assert integrate(p, (x, -2, 2)) == 5.0/6.0
assert integrate(p, (x, 2, -2)) == -5.0/6.0
p = Piecewise((0, x < 0), (1, x < 1), (0, x < 2), (1, x < 3), (0, True))
assert integrate(p, (x, -oo, oo)) == 2
p = Piecewise((x, x < -10), (x**2, x <= -1), (x, 1 < x))
raises(ValueError, lambda: integrate(p, (x, -2, 2)))
# Test commutativity
assert p.is_commutative is True
def test_piecewise_free_symbols():
a = symbols('a')
f = Piecewise((x, a < 0), (y, True))
assert f.free_symbols == set([x, y, a])
def test_piecewise_integrate():
# XXX Use '<=' here! '>=' is not yet implemented ..
f = Piecewise(((x - 2)**2, 0 <= x), (1, True))
assert integrate(f, (x, -2, 2)) == Rational(14, 3)
g = Piecewise(((x - 5)**5, 4 <= x), (f, True))
assert integrate(g, (x, -2, 2)) == Rational(14, 3)
assert integrate(g, (x, -2, 5)) == Rational(43, 6)
g = Piecewise(((x - 5)**5, 4 <= x), (f, x < 4))
assert integrate(g, (x, -2, 2)) == Rational(14, 3)
assert integrate(g, (x, -2, 5)) == Rational(43, 6)
g = Piecewise(((x - 5)**5, 2 <= x), (f, x < 2))
assert integrate(g, (x, -2, 2)) == Rational(14, 3)
assert integrate(g, (x, -2, 5)) == -Rational(701, 6)
g = Piecewise(((x - 5)**5, 2 <= x), (f, True))
assert integrate(g, (x, -2, 2)) == Rational(14, 3)
assert integrate(g, (x, -2, 5)) == -Rational(701, 6)
g = Piecewise(((x - 5)**5, 2 <= x), (2 * f, True))
assert integrate(g, (x, -2, 2)) == 2 * Rational(14, 3)
assert integrate(g, (x, -2, 5)) == -Rational(673, 6)
g = Piecewise((1, x > 0), (0, Eq(x, 0)), (-1, x < 0))
assert integrate(g, (x, -1, 1)) == 0
g = Piecewise((1, x - y < 0), (0, True))
assert integrate(g, (y, -oo, 0)) == -Min(0, x)
assert integrate(g, (y, 0, oo)) == oo - Max(0, x)
assert integrate(g, (y, -oo, oo)) == oo - x
g = Piecewise((0, x < 0), (x, x <= 1), (1, True))
assert integrate(g, (x, -5, 1)) == Rational(1, 2)
assert integrate(g, (x, -5, y)).subs(y, 1) == Rational(1, 2)
assert integrate(g, (x, y, 1)).subs(y, -5) == Rational(1, 2)
assert integrate(g, (x, 1, -5)) == -Rational(1, 2)
assert integrate(g, (x, 1, y)).subs(y, -5) == -Rational(1, 2)
assert integrate(g, (x, y, -5)).subs(y, 1) == -Rational(1, 2)
assert integrate(g, (x, -5, y)) == Piecewise((0, y < 0),
(y**2/2, y <= 1), (y - 0.5, True))
assert integrate(g, (x, y, 1)) == Piecewise((0.5, y < 0),
(0.5 - y**2/2, y <= 1), (1 - y, True))
g = Piecewise((1 - x, Interval(0, 1).contains(x)),
(1 + x, Interval(-1, 0).contains(x)), (0, True))
assert integrate(g, (x, -5, 1)) == 1
assert integrate(g, (x, -5, y)).subs(y, 1) == 1
assert integrate(g, (x, y, 1)).subs(y, -5) == 1
assert integrate(g, (x, 1, -5)) == -1
assert integrate(g, (x, 1, y)).subs(y, -5) == -1
assert integrate(g, (x, y, -5)).subs(y, 1) == -1
assert integrate(g, (x, -5, y)) == Piecewise(
(-y**2/2 + y + 0.5, Interval(0, 1).contains(y)),
(y**2/2 + y + 0.5, Interval(-1, 0).contains(y)),
(0, y <= -1), (1, True))
assert integrate(g, (x, y, 1)) == Piecewise(
(y**2/2 - y + 0.5, Interval(0, 1).contains(y)),
(-y**2/2 - y + 0.5, Interval(-1, 0).contains(y)),
(1, y <= -1), (0, True))
g = Piecewise((0, Or(x <= -1, x >= 1)), (1 - x, x > 0), (1 + x, True))
assert integrate(g, (x, -5, 1)) == 1
assert integrate(g, (x, -5, y)).subs(y, 1) == 1
assert integrate(g, (x, y, 1)).subs(y, -5) == 1
assert integrate(g, (x, 1, -5)) == -1
assert integrate(g, (x, 1, y)).subs(y, -5) == -1
assert integrate(g, (x, y, -5)).subs(y, 1) == -1
assert integrate(g, (x, -5, y)) == Piecewise((0, y <= -1), (1, y >= 1),
(-y**2/2 + y + 0.5, y > 0), (y**2/2 + y + 0.5, True))
assert integrate(g, (x, y, 1)) == Piecewise((1, y <= -1), (0, y >= 1),
(y**2/2 - y + 0.5, y > 0), (-y**2/2 - y + 0.5, True))
def test_piecewise_integrate_inequality_conditions():
c1, c2 = symbols("c1 c2", positive=True)
g = Piecewise((0, c1*x > 1), (1, c1*x > 0), (0, True))
assert integrate(g, (x, -oo, 0)) == 0
assert integrate(g, (x, -5, 0)) == 0
assert integrate(g, (x, 0, 5)) == Min(5, 1/c1)
assert integrate(g, (x, 0, oo)) == 1/c1
g = Piecewise((0, c1*x + c2*y > 1), (1, c1*x + c2*y > 0), (0, True))
assert integrate(g, (x, -oo, 0)).subs(y, 0) == 0
assert integrate(g, (x, -5, 0)).subs(y, 0) == 0
assert integrate(g, (x, 0, 5)).subs(y, 0) == Min(5, 1/c1)
assert integrate(g, (x, 0, oo)).subs(y, 0) == 1/c1
def test_piecewise_integrate_symbolic_conditions():
from sympy.abc import a, b, x, y
p0 = Piecewise((0, Or(x < a, x > b)), (1, True))
p1 = Piecewise((0, x < a), (0, x > b), (1, True))
p2 = Piecewise((0, x > b), (0, x < a), (1, True))
p3 = Piecewise((0, x < a), (1, x < b), (0, True))
p4 = Piecewise((0, x > b), (1, x > a), (0, True))
p5 = Piecewise((1, And(a < x, x < b)), (0, True))
assert integrate(p0, (x, -oo, y)) == Min(b, y) - Min(a, b, y)
assert integrate(p1, (x, -oo, y)) == Min(b, y) - Min(a, b, y)
assert integrate(p2, (x, -oo, y)) == Min(b, y) - Min(a, b, y)
assert integrate(p3, (x, -oo, y)) == Min(b, y) - Min(a, b, y)
assert integrate(p4, (x, -oo, y)) == Min(b, y) - Min(a, b, y)
assert integrate(p5, (x, -oo, y)) == Min(b, y) - Min(a, b, y)
assert integrate(p0, (x, y, oo)) == Max(a, b, y) - Max(a, y)
assert integrate(p1, (x, y, oo)) == Max(a, b, y) - Max(a, y)
assert integrate(p2, (x, y, oo)) == Max(a, b, y) - Max(a, y)
assert integrate(p3, (x, y, oo)) == Max(a, b, y) - Max(a, y)
assert integrate(p4, (x, y, oo)) == Max(a, b, y) - Max(a, y)
assert integrate(p5, (x, y, oo)) == Max(a, b, y) - Max(a, y)
assert integrate(p0, x) == Piecewise((0, Or(x < a, x > b)), (x, True))
assert integrate(p1, x) == Piecewise((0, Or(x < a, x > b)), (x, True))
assert integrate(p2, x) == Piecewise((0, Or(x < a, x > b)), (x, True))
p1 = Piecewise((0, x < a), (0.5, x > b), (1, True))
p2 = Piecewise((0.5, x > b), (0, x < a), (1, True))
p3 = Piecewise((0, x < a), (1, x < b), (0.5, True))
p4 = Piecewise((0.5, x > b), (1, x > a), (0, True))
p5 = Piecewise((1, And(a < x, x < b)), (0.5, x > b), (0, True))
assert integrate(p1, (x, -oo, y)) == 0.5*y + 0.5*Min(b, y) - Min(a, b, y)
assert integrate(p2, (x, -oo, y)) == 0.5*y + 0.5*Min(b, y) - Min(a, b, y)
assert integrate(p3, (x, -oo, y)) == 0.5*y + 0.5*Min(b, y) - Min(a, b, y)
assert integrate(p4, (x, -oo, y)) == 0.5*y + 0.5*Min(b, y) - Min(a, b, y)
assert integrate(p5, (x, -oo, y)) == 0.5*y + 0.5*Min(b, y) - Min(a, b, y)
def test_piecewise_integrate_independent_conditions():
p = Piecewise((0, Eq(y, 0)), (x*y, True))
assert integrate(p, (x, 1, 3)) == \
Piecewise((0, Eq(y, 0)), (4*y, True))
def test_piecewise_simplify():
p = Piecewise(((x**2 + 1)/x**2, Eq(x*(1 + x) - x**2, 0)),
((-1)**x*(-1), True))
assert p.simplify() == \
Piecewise((1 + 1/x**2, Eq(x, 0)), ((-1)**(x + 1), True))
def test_piecewise_solve():
abs2 = Piecewise((-x, x <= 0), (x, x > 0))
f = abs2.subs(x, x - 2)
assert solve(f, x) == [2]
assert solve(f - 1, x) == [1, 3]
f = Piecewise(((x - 2)**2, x >= 0), (1, True))
assert solve(f, x) == [2]
g = Piecewise(((x - 5)**5, x >= 4), (f, True))
assert solve(g, x) == [2, 5]
g = Piecewise(((x - 5)**5, x >= 4), (f, x < 4))
assert solve(g, x) == [2, 5]
g = Piecewise(((x - 5)**5, x >= 2), (f, x < 2))
assert solve(g, x) == [5]
g = Piecewise(((x - 5)**5, x >= 2), (f, True))
assert solve(g, x) == [5]
g = Piecewise(((x - 5)**5, x >= 2), (f, True), (10, False))
assert solve(g, x) == [5]
g = Piecewise(((x - 5)**5, x >= 2),
(-x + 2, x - 2 <= 0), (x - 2, x - 2 > 0))
assert solve(g, x) == [5]
# See issue 1253 (enhance the solver to handle inequalities).
@XFAIL
def test_piecewise_solve2():
f = Piecewise(((x - 2)**2, x >= 0), (0, True))
assert solve(f, x) == [2, Interval(0, oo, True, True)]
def test_piecewise_fold():
p = Piecewise((x, x < 1), (1, 1 <= x))
assert piecewise_fold(x*p) == Piecewise((x**2, x < 1), (x, 1 <= x))
assert piecewise_fold(p + p) == Piecewise((2*x, x < 1), (2, 1 <= x))
assert piecewise_fold(Piecewise((1, x < 0), (2, True))
+ Piecewise((10, x < 0), (-10, True))) == \
Piecewise((11, x < 0), (-8, True))
p1 = Piecewise((0, x < 0), (x, x <= 1), (0, True))
p2 = Piecewise((0, x < 0), (1 - x, x <= 1), (0, True))
p = 4*p1 + 2*p2
assert integrate(
piecewise_fold(p), (x, -oo, oo)) == integrate(2*x + 2, (x, 0, 1))
def test_piecewise_fold_piecewise_in_cond():
p1 = Piecewise((cos(x), x < 0), (0, True))
p2 = Piecewise((0, Eq(p1, 0)), (p1 / Abs(p1), True))
p3 = piecewise_fold(p2)
assert(p2.subs(x, -pi/2) == 0.0)
assert(p2.subs(x, 1) == 0.0)
assert(p2.subs(x, -pi/4) == 1.0)
p4 = Piecewise((0, Eq(p1, 0)), (1,True))
assert(piecewise_fold(p4) == Piecewise(
(0, Or(And(Eq(cos(x), 0), x < 0), Not(x < 0))), (1, True)))
r1 = 1 < Piecewise((1, x < 1), (3, True))
assert(piecewise_fold(r1) == Not(x < 1))
p5 = Piecewise((1, x < 0), (3, True))
p6 = Piecewise((1, x < 1), (3, True))
p7 = piecewise_fold(Piecewise((1, p5 < p6), (0, True)))
assert(Piecewise((1, And(Not(x < 1), x < 0)), (0, True)))
@XFAIL
def test_piecewise_fold_piecewise_in_cond_2():
p1 = Piecewise((cos(x), x < 0), (0, True))
p2 = Piecewise((0, Eq(p1, 0)), (1 / p1, True))
p3 = Piecewise((0, Or(And(Eq(cos(x), 0), x < 0), Not(x < 0))),
(1 / cos(x), True))
assert(piecewise_fold(p2) == p3)
def test_piecewise_fold_expand():
p1 = Piecewise((1, Interval(0, 1, False, True).contains(x)), (0, True))
p2 = piecewise_fold(expand((1 - x)*p1))
assert p2 == Piecewise((1 - x, Interval(0, 1, False, True).contains(x)),
(Piecewise((-x, Interval(0, 1, False, True).contains(x)), (0, True)), True))
p2 = expand(piecewise_fold((1 - x)*p1))
assert p2 == Piecewise(
(1 - x, Interval(0, 1, False, True).contains(x)), (0, True))
def test_piecewise_duplicate():
p = Piecewise((x, x < -10), (x**2, x <= -1), (x, 1 < x))
assert p == Piecewise(*p.args)
def test_doit():
p1 = Piecewise((x, x < 1), (x**2, -1 <= x), (x, 3 < x))
p2 = Piecewise((x, x < 1), (Integral(2 * x), -1 <= x), (x, 3 < x))
assert p2.doit() == p1
assert p2.doit(deep=False) == p2
def test_piecewise_interval():
p1 = Piecewise((x, Interval(0, 1).contains(x)), (0, True))
assert p1.subs(x, -0.5) == 0
assert p1.subs(x, 0.5) == 0.5
assert p1.diff(x) == Piecewise((1, Interval(0, 1).contains(x)), (0, True))
assert integrate(
p1, x) == Piecewise((x**2/2, Interval(0, 1).contains(x)), (0, True))
def test_piecewise_collapse():
p1 = Piecewise((x, x < 0), (x**2, x > 1))
p2 = Piecewise((p1, x < 0), (p1, x > 1))
assert p2 == Piecewise((x, x < 0), (x**2, 1 < x))
p1 = Piecewise((Piecewise((x, x < 0), (1, True)), True))
assert p1 == Piecewise((Piecewise((x, x < 0), (1, True)), True))
def test_piecewise_lambdify():
p = Piecewise(
(x**2, x < 0),
(x, Interval(0, 1, False, True).contains(x)),
(2 - x, x >= 1),
(0, True)
)
f = lambdify(x, p)
assert f(-2.0) == 4.0
assert f(0.0) == 0.0
assert f(0.5) == 0.5
assert f(2.0) == 0.0
def test_piecewise_series():
from sympy import sin, cos, O
p1 = Piecewise((sin(x), x < 0), (cos(x), x > 0))
p2 = Piecewise((x + O(x**2), x < 0), (1 + O(x**2), x > 0))
assert p1.nseries(x, n=2) == p2
def test_piecewise_as_leading_term():
p1 = Piecewise((1/x, x > 1), (0, True))
p2 = Piecewise((x, x > 1), (0, True))
p3 = Piecewise((1/x, x > 1), (x, True))
p4 = Piecewise((x, x > 1), (1/x, True))
p5 = Piecewise((1/x, x > 1), (x, True))
p6 = Piecewise((1/x, x < 1), (x, True))
p7 = Piecewise((x, x < 1), (1/x, True))
p8 = Piecewise((x, x > 1), (1/x, True))
assert p1.as_leading_term(x) == 0
assert p2.as_leading_term(x) == 0
assert p3.as_leading_term(x) == x
assert p4.as_leading_term(x) == 1/x
assert p5.as_leading_term(x) == x
assert p6.as_leading_term(x) == 1/x
assert p7.as_leading_term(x) == x
assert p8.as_leading_term(x) == 1/x
def test_piecewise_complex():
p1 = Piecewise((2, x < 0), (1, 0 <= x))
p2 = Piecewise((2*I, x < 0), (I, 0 <= x))
p3 = Piecewise((I*x, x > 1), (1 + I, True))
p4 = Piecewise((-I*conjugate(x), x > 1), (1 - I, True))
assert conjugate(p1) == p1
assert conjugate(p2) == piecewise_fold(-p2)
assert conjugate(p3) == p4
assert p1.is_imaginary is False
assert p1.is_real is True
assert p2.is_imaginary is True
assert p2.is_real is False
assert p3.is_imaginary is None
assert p3.is_real is None
assert p1.as_real_imag() == (p1, 0)
assert p2.as_real_imag() == (0, -I*p2)
def test_conjugate_transpose():
A, B = symbols("A B", commutative=False)
p = Piecewise((A*B**2, x > 0), (A**2*B, True))
assert p.adjoint() == \
Piecewise((adjoint(A*B**2), x > 0), (adjoint(A**2*B), True))
assert p.conjugate() == \
Piecewise((conjugate(A*B**2), x > 0), (conjugate(A**2*B), True))
assert p.transpose() == \
Piecewise((transpose(A*B**2), x > 0), (transpose(A**2*B), True))
def test_piecewise_evaluate():
assert Piecewise((x, True)) == x
assert Piecewise((x, True), evaluate=True) == x
p = Piecewise((x, True), evaluate=False)
assert p != x
assert p.is_Piecewise
assert all(isinstance(i, Basic) for i in p.args)
|
|
from __future__ import absolute_import, division
import os
import traceback
import sys
import math
import urwid
from netlib import odict
from netlib.http import CONTENT_MISSING, Headers
from . import common, grideditor, signals, searchable, tabs
from . import flowdetailview
from .. import utils, controller, contentviews
from ..models import HTTPRequest, HTTPResponse, decoded
from ..exceptions import ContentViewException
class SearchError(Exception):
pass
def _mkhelp():
text = []
keys = [
("A", "accept all intercepted flows"),
("a", "accept this intercepted flow"),
("b", "save request/response body"),
("D", "duplicate flow"),
("d", "delete flow"),
("E", "export"),
("e", "edit request/response"),
("f", "load full body data"),
("m", "change body display mode for this entity"),
(None,
common.highlight_key("automatic", "a") +
[("text", ": automatic detection")]
),
(None,
common.highlight_key("hex", "e") +
[("text", ": Hex")]
),
(None,
common.highlight_key("html", "h") +
[("text", ": HTML")]
),
(None,
common.highlight_key("image", "i") +
[("text", ": Image")]
),
(None,
common.highlight_key("javascript", "j") +
[("text", ": JavaScript")]
),
(None,
common.highlight_key("json", "s") +
[("text", ": JSON")]
),
(None,
common.highlight_key("urlencoded", "u") +
[("text", ": URL-encoded data")]
),
(None,
common.highlight_key("raw", "r") +
[("text", ": raw data")]
),
(None,
common.highlight_key("xml", "x") +
[("text", ": XML")]
),
("M", "change default body display mode"),
("p", "previous flow"),
("P", "copy response(content/headers) to clipboard"),
("r", "replay request"),
("V", "revert changes to request"),
("v", "view body in external viewer"),
("w", "save all flows matching current limit"),
("W", "save this flow"),
("x", "delete body"),
("z", "encode/decode a request/response"),
("tab", "next tab"),
("h, l", "previous tab, next tab"),
("space", "next flow"),
("|", "run script on this flow"),
("/", "search (case sensitive)"),
("n", "repeat search forward"),
("N", "repeat search backwards"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
footer = [
('heading_key', "?"), ":help ",
('heading_key', "q"), ":back ",
]
class FlowViewHeader(urwid.WidgetWrap):
def __init__(self, master, f):
self.master, self.flow = master, f
self._w = common.format_flow(
f,
False,
extended=True,
padding=0,
hostheader=self.master.showhost
)
signals.flow_change.connect(self.sig_flow_change)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self._w = common.format_flow(
flow,
False,
extended=True,
padding=0,
hostheader=self.master.showhost
)
cache = utils.LRUCache(200)
TAB_REQ = 0
TAB_RESP = 1
class FlowView(tabs.Tabs):
highlight_color = "focusfield"
def __init__(self, master, state, flow, tab_offset):
self.master, self.state, self.flow = master, state, flow
tabs.Tabs.__init__(self,
[
(self.tab_request, self.view_request),
(self.tab_response, self.view_response),
(self.tab_details, self.view_details),
],
tab_offset
)
self.show()
self.last_displayed_body = None
signals.flow_change.connect(self.sig_flow_change)
def tab_request(self):
if self.flow.intercepted and not self.flow.reply.acked and not self.flow.response:
return "Request intercepted"
else:
return "Request"
def tab_response(self):
if self.flow.intercepted and not self.flow.reply.acked and self.flow.response:
return "Response intercepted"
else:
return "Response"
def tab_details(self):
return "Detail"
def view_request(self):
return self.conn_text(self.flow.request)
def view_response(self):
return self.conn_text(self.flow.response)
def view_details(self):
return flowdetailview.flowdetails(self.state, self.flow)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self.show()
def content_view(self, viewmode, message):
if message.content == CONTENT_MISSING:
msg, body = "", [urwid.Text([("error", "[content missing]")])]
return msg, body
else:
full = self.state.get_flow_setting(
self.flow,
(self.tab_offset, "fullcontents"),
False
)
if full:
limit = sys.maxsize
else:
limit = contentviews.VIEW_CUTOFF
return cache.get(
self._get_content_view,
viewmode,
message,
limit,
(bytes(message.headers), message.content) # Cache invalidation
)
def _get_content_view(self, viewmode, message, max_lines, _):
try:
description, lines = contentviews.get_content_view(
viewmode, message.content, headers=message.headers
)
except ContentViewException:
s = "Content viewer failed: \n" + traceback.format_exc()
signals.add_event(s, "error")
description, lines = contentviews.get_content_view(
contentviews.get("Raw"), message.content, headers=message.headers
)
description = description.replace("Raw", "Couldn't parse: falling back to Raw")
# Give hint that you have to tab for the response.
if description == "No content" and isinstance(message, HTTPRequest):
description = "No request content (press tab to view response)"
# If the users has a wide terminal, he gets fewer lines; this should not be an issue.
chars_per_line = 80
max_chars = max_lines * chars_per_line
total_chars = 0
text_objects = []
for line in lines:
txt = []
for (style, text) in line:
if total_chars + len(text) > max_chars:
text = text[:max_chars - total_chars]
txt.append((style, text))
total_chars += len(text)
if total_chars == max_chars:
break
# round up to the next line.
total_chars = int(math.ceil(total_chars / chars_per_line) * chars_per_line)
text_objects.append(urwid.Text(txt))
if total_chars == max_chars:
text_objects.append(urwid.Text([
("highlight", "Stopped displaying data after %d lines. Press " % max_lines),
("key", "f"),
("highlight", " to load all data.")
]))
break
return description, text_objects
def viewmode_get(self):
override = self.state.get_flow_setting(
self.flow,
(self.tab_offset, "prettyview")
)
return self.state.default_body_view if override is None else override
def conn_text(self, conn):
if conn:
txt = common.format_keyvals(
[(h + ":", v) for (h, v) in conn.headers.fields],
key = "header",
val = "text"
)
viewmode = self.viewmode_get()
msg, body = self.content_view(viewmode, conn)
cols = [
urwid.Text(
[
("heading", msg),
]
),
urwid.Text(
[
" ",
('heading', "["),
('heading_key', "m"),
('heading', (":%s]" % viewmode.name)),
],
align="right"
)
]
title = urwid.AttrWrap(urwid.Columns(cols), "heading")
txt.append(title)
txt.extend(body)
else:
txt = [
urwid.Text(""),
urwid.Text(
[
("highlight", "No response. Press "),
("key", "e"),
("highlight", " and edit any aspect to add one."),
]
)
]
return searchable.Searchable(self.state, txt)
def set_method_raw(self, m):
if m:
self.flow.request.method = m
signals.flow_change.send(self, flow = self.flow)
def edit_method(self, m):
if m == "e":
signals.status_prompt.send(
prompt = "Method",
text = self.flow.request.method,
callback = self.set_method_raw
)
else:
for i in common.METHOD_OPTIONS:
if i[1] == m:
self.flow.request.method = i[0].upper()
signals.flow_change.send(self, flow = self.flow)
def set_url(self, url):
request = self.flow.request
try:
request.url = str(url)
except ValueError:
return "Invalid URL."
signals.flow_change.send(self, flow = self.flow)
def set_resp_code(self, code):
response = self.flow.response
try:
response.status_code = int(code)
except ValueError:
return None
import BaseHTTPServer
if int(code) in BaseHTTPServer.BaseHTTPRequestHandler.responses:
response.msg = BaseHTTPServer.BaseHTTPRequestHandler.responses[
int(code)][0]
signals.flow_change.send(self, flow = self.flow)
def set_resp_msg(self, msg):
response = self.flow.response
response.msg = msg
signals.flow_change.send(self, flow = self.flow)
def set_headers(self, fields, conn):
conn.headers = Headers(fields)
signals.flow_change.send(self, flow = self.flow)
def set_query(self, lst, conn):
conn.set_query(odict.ODict(lst))
signals.flow_change.send(self, flow = self.flow)
def set_path_components(self, lst, conn):
conn.set_path_components(lst)
signals.flow_change.send(self, flow = self.flow)
def set_form(self, lst, conn):
conn.set_form_urlencoded(odict.ODict(lst))
signals.flow_change.send(self, flow = self.flow)
def edit_form(self, conn):
self.master.view_grideditor(
grideditor.URLEncodedFormEditor(
self.master,
conn.get_form_urlencoded().lst,
self.set_form,
conn
)
)
def edit_form_confirm(self, key, conn):
if key == "y":
self.edit_form(conn)
def set_cookies(self, lst, conn):
od = odict.ODict(lst)
conn.set_cookies(od)
signals.flow_change.send(self, flow = self.flow)
def set_setcookies(self, data, conn):
conn.set_cookies(data)
signals.flow_change.send(self, flow = self.flow)
def edit(self, part):
if self.tab_offset == TAB_REQ:
message = self.flow.request
else:
if not self.flow.response:
self.flow.response = HTTPResponse(
self.flow.request.http_version,
200, "OK", Headers(), ""
)
self.flow.response.reply = controller.DummyReply()
message = self.flow.response
self.flow.backup()
if message == self.flow.request and part == "c":
self.master.view_grideditor(
grideditor.CookieEditor(
self.master,
message.get_cookies().lst,
self.set_cookies,
message
)
)
if message == self.flow.response and part == "c":
self.master.view_grideditor(
grideditor.SetCookieEditor(
self.master,
message.get_cookies(),
self.set_setcookies,
message
)
)
if part == "r":
with decoded(message):
# Fix an issue caused by some editors when editing a
# request/response body. Many editors make it hard to save a
# file without a terminating newline on the last line. When
# editing message bodies, this can cause problems. For now, I
# just strip the newlines off the end of the body when we return
# from an editor.
c = self.master.spawn_editor(message.content or "")
message.content = c.rstrip("\n")
elif part == "f":
if not message.get_form_urlencoded() and message.content:
signals.status_prompt_onekey.send(
prompt = "Existing body is not a URL-encoded form. Clear and edit?",
keys = [
("yes", "y"),
("no", "n"),
],
callback = self.edit_form_confirm,
args = (message,)
)
else:
self.edit_form(message)
elif part == "h":
self.master.view_grideditor(
grideditor.HeaderEditor(
self.master,
message.headers.fields,
self.set_headers,
message
)
)
elif part == "p":
p = message.get_path_components()
self.master.view_grideditor(
grideditor.PathEditor(
self.master,
p,
self.set_path_components,
message
)
)
elif part == "q":
self.master.view_grideditor(
grideditor.QueryEditor(
self.master,
message.get_query().lst,
self.set_query, message
)
)
elif part == "u":
signals.status_prompt.send(
prompt = "URL",
text = message.url,
callback = self.set_url
)
elif part == "m":
signals.status_prompt_onekey.send(
prompt = "Method",
keys = common.METHOD_OPTIONS,
callback = self.edit_method
)
elif part == "o":
signals.status_prompt.send(
prompt = "Code",
text = str(message.status_code),
callback = self.set_resp_code
)
elif part == "m":
signals.status_prompt.send(
prompt = "Message",
text = message.msg,
callback = self.set_resp_msg
)
signals.flow_change.send(self, flow = self.flow)
def _view_nextprev_flow(self, np, flow):
try:
idx = self.state.view.index(flow)
except IndexError:
return
if np == "next":
new_flow, new_idx = self.state.get_next(idx)
else:
new_flow, new_idx = self.state.get_prev(idx)
if new_flow is None:
signals.status_message.send(message="No more flows!")
else:
signals.pop_view_state.send(self)
self.master.view_flow(new_flow, self.tab_offset)
def view_next_flow(self, flow):
return self._view_nextprev_flow("next", flow)
def view_prev_flow(self, flow):
return self._view_nextprev_flow("prev", flow)
def change_this_display_mode(self, t):
self.state.add_flow_setting(
self.flow,
(self.tab_offset, "prettyview"),
contentviews.get_by_shortcut(t)
)
signals.flow_change.send(self, flow = self.flow)
def delete_body(self, t):
if t == "m":
val = CONTENT_MISSING
else:
val = None
if self.tab_offset == TAB_REQ:
self.flow.request.content = val
else:
self.flow.response.content = val
signals.flow_change.send(self, flow = self.flow)
def keypress(self, size, key):
key = super(self.__class__, self).keypress(size, key)
if key == " ":
self.view_next_flow(self.flow)
return
key = common.shortcuts(key)
if self.tab_offset == TAB_REQ:
conn = self.flow.request
elif self.tab_offset == TAB_RESP:
conn = self.flow.response
else:
conn = None
if key in ("up", "down", "page up", "page down"):
# Why doesn't this just work??
self._w.keypress(size, key)
elif key == "a":
self.flow.accept_intercept(self.master)
signals.flow_change.send(self, flow = self.flow)
elif key == "A":
self.master.accept_all()
signals.flow_change.send(self, flow = self.flow)
elif key == "d":
if self.state.flow_count() == 1:
self.master.view_flowlist()
elif self.state.view.index(self.flow) == len(self.state.view) - 1:
self.view_prev_flow(self.flow)
else:
self.view_next_flow(self.flow)
f = self.flow
f.kill(self.master)
self.state.delete_flow(f)
elif key == "D":
f = self.master.duplicate_flow(self.flow)
self.master.view_flow(f)
signals.status_message.send(message="Duplicated.")
elif key == "p":
self.view_prev_flow(self.flow)
elif key == "r":
r = self.master.replay_request(self.flow)
if r:
signals.status_message.send(message=r)
signals.flow_change.send(self, flow = self.flow)
elif key == "V":
if not self.flow.modified():
signals.status_message.send(message="Flow not modified.")
return
self.state.revert(self.flow)
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="Reverted.")
elif key == "W":
signals.status_prompt_path.send(
prompt = "Save this flow",
callback = self.master.save_one_flow,
args = (self.flow,)
)
elif key == "E":
signals.status_prompt_onekey.send(
self,
prompt = "Export",
keys = (
("as curl command", "c"),
("as python code", "p"),
("as raw request", "r"),
),
callback = common.export_prompt,
args = (self.flow,)
)
elif key == "|":
signals.status_prompt_path.send(
prompt = "Send flow to script",
callback = self.master.run_script_once,
args = (self.flow,)
)
if not conn and key in set(list("befgmxvz")):
signals.status_message.send(
message = "Tab to the request or response",
expire = 1
)
elif conn:
if key == "b":
if self.tab_offset == TAB_REQ:
common.ask_save_body(
"q", self.master, self.state, self.flow
)
else:
common.ask_save_body(
"s", self.master, self.state, self.flow
)
elif key == "e":
if self.tab_offset == TAB_REQ:
signals.status_prompt_onekey.send(
prompt = "Edit request",
keys = (
("cookies", "c"),
("query", "q"),
("path", "p"),
("url", "u"),
("header", "h"),
("form", "f"),
("raw body", "r"),
("method", "m"),
),
callback = self.edit
)
else:
signals.status_prompt_onekey.send(
prompt = "Edit response",
keys = (
("cookies", "c"),
("code", "o"),
("message", "m"),
("header", "h"),
("raw body", "r"),
),
callback = self.edit
)
key = None
elif key == "f":
signals.status_message.send(message="Loading all body data...")
self.state.add_flow_setting(
self.flow,
(self.tab_offset, "fullcontents"),
True
)
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="")
elif key == "P":
if self.tab_offset == TAB_REQ:
scope = "q"
else:
scope = "s"
common.ask_copy_part(scope, self.flow, self.master, self.state)
elif key == "m":
p = list(contentviews.view_prompts)
p.insert(0, ("Clear", "C"))
signals.status_prompt_onekey.send(
self,
prompt = "Display mode",
keys = p,
callback = self.change_this_display_mode
)
key = None
elif key == "x":
signals.status_prompt_onekey.send(
prompt = "Delete body",
keys = (
("completely", "c"),
("mark as missing", "m"),
),
callback = self.delete_body
)
key = None
elif key == "v":
if conn.content:
t = conn.headers.get("content-type")
if "EDITOR" in os.environ or "PAGER" in os.environ:
self.master.spawn_external_viewer(conn.content, t)
else:
signals.status_message.send(
message = "Error! Set $EDITOR or $PAGER."
)
elif key == "z":
self.flow.backup()
e = conn.headers.get("content-encoding", "identity")
if e != "identity":
if not conn.decode():
signals.status_message.send(
message = "Could not decode - invalid data?"
)
else:
signals.status_prompt_onekey.send(
prompt = "Select encoding: ",
keys = (
("gzip", "z"),
("deflate", "d"),
),
callback = self.encode_callback,
args = (conn,)
)
signals.flow_change.send(self, flow = self.flow)
return key
def encode_callback(self, key, conn):
encoding_map = {
"z": "gzip",
"d": "deflate",
}
conn.encode(encoding_map[key])
signals.flow_change.send(self, flow = self.flow)
|
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_Testcase/IOST_WSetupTestcase.py
# Date : Sep 27, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from pprint import pprint
from IOST_Basic import *
from IOST_Config import *
from IOST_Terminal import *
import gtk
import gtk.glade
import gobject
import pango
import vte
#======================================================================
try:
IOST_DBG_EN
if IOST_DBG_EN:
IOST_WSetupTestcase_DebugEnable =1
IOST_WSetupTestcase_DebugLevel = IOST_DBG_L06
else:
IOST_WSetupTestcase_DebugEnable =0
IOST_WSetupTestcase_DebugLevel = IOST_DBG_L01
except:
IOST_DBG_EN = False
IOST_WSetupTestcase_DebugEnable =0
IOST_WSetupTestcase_DebugLevel = IOST_DBG_L01
#======================================================================
class IOST_WSetupTestcase():
"""
"""
def __init__(self, glade_filename, window_name, object_name, builder=None ):
""
self.IOST_WSetupTestcas_glade_filename = glade_filename
self.IOST_WSetupTestcase_window = window_name
self.IOST_WSetupTestcase_object = object_name
if not builder:
self.WRun_WSetupTestcase_Builder = gtk.Builder()
self.WRun_WSetupTestcase_Builder.add_from_file(glade_filename)
self.WRun_WSetupTestcase_Builder.connect_signals(self)
else:
self.WRun_WSetupTestcase_Builder = builder
# self.IOST_Objs[window_name][window_name+object_name] = self.WRun_WSetupTestcase_Builder.get_object(window_name+object_name)
#----------------------------------------------------------------------
def WSetupTestcase_Get_Objs(self, window_name):
""
# self.CreateObjs(self.IOST_Objs[window_name], self.WRun_WSetupTestcase_Builder, 2)
self.CreateObjsDictFromDict(window_name, self.IOST_Objs[window_name], self.WRun_WSetupTestcase_Builder, 0)
if IOST_WSetupTestcase_DebugEnable and 0:
pprint(self.IOST_Objs[window_name])
#----------------------------------------------------------------------
def WSetupTestcase_Clear(self, window_name):
"""
"""
self.IOST_Objs[window_name]["_SelectAll_CB"].set_active(False)
self.IOST_Objs[window_name]["_ResetAll_CB"].set_active(False)
for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseListDefault"] ):
# print "At horizontal ", i
# print window_name+"_"+str(i+1)+"_Enable_CB"
self.IOST_Objs[window_name]["_"+str(i+1)+"_Enable_CB"].set_sensitive(False)
self.IOST_Objs[window_name]["_"+str(i+1)+"_Enable_CB"].set_active(False)
self.IOST_Objs[window_name]["_"+str(i+1)+"_Cmd_TE"].set_text("")
self.IOST_Objs[window_name]["_"+str(i+1)+"_Description_TE"].set_text("")
self.IOST_Objs[window_name]["_"+str(i+1)+"_RunTimes_TE"].set_text("")
self.IOST_Objs[window_name]["_"+str(i+1)+"_ResetBoard_CB"].set_active(False)
self.IOST_Objs[window_name]["_"+str(i+1)+"_ResetBoard_CB"].set_sensitive(False)
#----------------------------------------------------------------------
def WSetupTestcase_SetCallback(self, window_name, object_name, number):
"""
"""
# for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]):
self.IOST_Objs[window_name]["_"+str(number)+"_Enable_CB"].connect('toggled', \
self.WSetupTestcase_Enable_CB_Callback, \
self.IOST_Objs[window_name]["_"+str(number)+"_Enable_CB"], \
object_name, number)
self.IOST_Objs[window_name]["_"+str(number)+"_Cmd_TE"].connect('focus-out-event', \
self.WSetupTestcase_CMD_TE_Callback, \
self.IOST_Objs[window_name]["_"+str(number)+"_Cmd_TE"], \
object_name, number, self.IOST_Objs[window_name]["_"+str(number)+"_Enable_CB"], \
self.IOST_Objs[window_name]["_"+str(number)+"_ResetBoard_CB"])
self.IOST_Objs[window_name]["_"+str(number)+"_Description_TE"].connect('focus-out-event', \
self.WSetupTestcase_Description_TE_Callback, \
self.IOST_Objs[window_name]["_"+str(number)+"_Description_TE"], \
object_name, number, self.IOST_Objs[window_name]["_"+str(number)+"_Enable_CB"], \
self.IOST_Objs[window_name]["_"+str(number)+"_ResetBoard_CB"])
self.IOST_Objs[window_name]["_"+str(number)+"_RunTimes_TE"].connect('focus-out-event', \
self.WSetupTestcase_RunTimes_TE_Callback, \
self.IOST_Objs[window_name]["_"+str(number)+"_RunTimes_TE"], \
object_name, number, self.IOST_Objs[window_name]["_"+str(number)+"_Enable_CB"], \
self.IOST_Objs[window_name]["_"+str(number)+"_ResetBoard_CB"])
self.IOST_Objs[window_name]["_"+str(number)+"_ResetBoard_CB"].connect('toggled', \
self.WSetupTestcase_ResetBoard_CB_Callback,
self.IOST_Objs[window_name]["_"+str(number)+"_ResetBoard_CB"],
object_name, number)
#----------------------------------------------------------------------
def WSetupTestcase_Enable_CB_Callback(self, object, object_active, data=None, number=None):
""
if number > self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]:
data.append( self.IOST_Data["IP_N"][1] )
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] +=1
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_E_Testcase"] =0
if IOST_WSetupTestcase_DebugEnable:
iost_print(IOST_WSetupTestcase_DebugLevel, None, "0-0-0-0-0-0-0-0-0")
iost_print(IOST_WSetupTestcase_DebugLevel, number, "WSetupTestcase_Enable_CB -> number")
iost_print(IOST_WSetupTestcase_DebugLevel, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"], "WSetupTestcase_Enable_CB -> numbTestcaseNumber")
iost_print(IOST_WSetupTestcase_DebugLevel, number, "WSetupTestcase_Enable_CB -> TestcaseList")
Res = object_active.get_active()
if Res:
data[int(number)]["Active"] = 'Enable'
else:
data[int(number)]["Active"] = 'Disable'
#----------------------------------------------------------------------
def WSetupTestcase_CMD_TE_Callback(self, object, event, object_active, data=None, number=None, set_object_1=None, set_object_2=None):
"Callback function when CMD text entry of a testcase have changed"
#______________________________________________________________________
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] = 0
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] = 0
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] = 0
#______________________________________________________________________
if IOST_WSetupTestcase_DebugEnable:
iost_print(IOST_WSetupTestcase_DebugLevel, None, "1-1-1-1-1-1-1-1-1" )
iost_print(IOST_WSetupTestcase_DebugLevel, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"], "WSetupTestcase_CMD_TE->TestcaseNumber")
test_segment = 0
if number > self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]:
test_segment = number - self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
for i in range(0, test_segment):
data.append( self.IOST_Data["IP_N"][1] )
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] +=1
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] = 0
#______________________________________________________________________
if IOST_WSetupTestcase_DebugEnable:
print "number = ", number
iost_print(IOST_WSetupTestcase_DebugLevel, number, "WSetupTestcase_CMD_TE->number")
iost_print(IOST_WSetupTestcase_DebugLevel, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"], "WSetupTestcase_CMD_TE->TestcaseNumber")
iost_print(IOST_WSetupTestcase_DebugLevel, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"], "WSetupTestcase_CMD_TE->TestcaseList")
#______________________________________________________________________
text = object_active.get_text()
if text and text.strip(): # string NOT empty"
set_object_1.set_sensitive(True)
set_object_2.set_sensitive(True)
data[number]["Testcase"] = text
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] = 1
#______________________________________________________________________
else: # string empty"
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] = 0
#______________________________________________________________________
if ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 1) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 1)):
print "x1"
set_object_1.set_active(True)
set_object_1.set_sensitive(True)
data[number]["Active"] = 'Enable'
elif ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 0) or
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 0)):
data[number]["Active"] = 'Disable'
#______________________________________________________________________
if ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 0) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] == 0) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 0)):
set_object_1.set_active(False)
set_object_1.set_sensitive(False)
set_object_2.set_active(False)
set_object_2.set_sensitive(False)
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] -= test_segment
for i in range(test_segment, 0, -1):
del data[self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]+i]
if IOST_WSetupTestcase_DebugEnable:
print "-------------------------------------------"
print "testcase number = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
print "len data = ", len(data)
print "_CMD_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"]
print "_DES_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"]
print "_RT_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"]
print "------------------debug-------------------------"
print number
print self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]
if number == self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]:
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"] +=1
self.WSetupTestcase_CreateTestListMore(self.IOST_Objs[self.IOST_WSetupTestcase_window], self.IOST_Objs["SetupTestcaseTemplate"], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"])
self.WSetupTestcase_SetCallback(self.IOST_WSetupTestcase_window, self.IOST_Data[self.IOST_WSetupTestcase_ip_name], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"])
# adjustment = yourScrolledWindow.get_vadjustment()
# adjustment.set_value(adjustment.get_upper())
adjustment = self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].get_vadjustment()
adjustment.set_value(adjustment.get_upper())
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].set_vadjustment(adjustment)
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].set_placement(gtk.CORNER_BOTTOM_LEFT)
self.IOST_Objs[self.IOST_WSetupTestcase_window][self.IOST_WSetupTestcase_object].show_all()
if IOST_WSetupTestcase_DebugEnable:
print "1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1-1"
#----------------------------------------------------------------------
def WSetupTestcase_Description_TE_Callback(self, object, event, object_active, data=None, number=None, set_object_1=None, set_object_2=None):
"Callback function when Description text entry of a testcase have changed"
#______________________________________________________________________
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] = 0
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] = 0
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] = 0
#______________________________________________________________________
test_segment = 0
if number > self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]:
test_segment = number - self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
for i in range(0, test_segment):
data.append( self.IOST_Data["IP_N"][1] )
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] +=1
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] = 0
#______________________________________________________________________
if IOST_WSetupTestcase_DebugEnable:
print "2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2"
print "number = ", number
print "testcase number = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
print "testcase list = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]
#______________________________________________________________________
text = object_active.get_text()
if text and text.strip(): # string NOT empty"
set_object_1.set_sensitive(True)
set_object_2.set_sensitive(True)
data[number]["Description"] = text
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] = 1
#______________________________________________________________________
else: # string empty"
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] = 0
#______________________________________________________________________
if ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 1) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 1)):
print "x2"
set_object_1.set_active(True)
set_object_1.set_sensitive(True)
data[number]["Active"] = 'Enable'
elif ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 0) or
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 0)):
data[number]["Active"] = 'Disable'
#______________________________________________________________________
if ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 0) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] == 0) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 0)):
set_object_1.set_active(False)
set_object_1.set_sensitive(False)
set_object_2.set_active(False)
set_object_2.set_sensitive(False)
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] -= test_segment
for i in range(test_segment, 0, -1):
del data[self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]+i]
if IOST_WSetupTestcase_DebugEnable:
print "-------------------------------------------"
print "testcase number = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
print "len data = ", len(data)
print "_CMD_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"]
print "_DES_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"]
print "_RT_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"]
print "2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2-2"
if number == self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]:
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"] +=1
self.WSetupTestcase_CreateTestListMore(self.IOST_Objs[self.IOST_WSetupTestcase_window], self.IOST_Objs["SetupTestcaseTemplate"], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"])
self.WSetupTestcase_SetCallback(self.IOST_WSetupTestcase_window, self.IOST_Data[self.IOST_WSetupTestcase_ip_name], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"])
adjustment = self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].get_vadjustment()
adjustment.set_value(adjustment.get_upper())
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].set_vadjustment(adjustment)
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].set_placement(gtk.CORNER_BOTTOM_LEFT)
self.IOST_Objs[self.IOST_WSetupTestcase_window][self.IOST_WSetupTestcase_object].show_all()
#----------------------------------------------------------------------
def WSetupTestcase_RunTimes_TE_Callback(self, object, event, object_active, data=None, number=None, set_object_1=None, set_object_2=None):
"Callback function when Runtime text entry of a testcase have changed"
#______________________________________________________________________
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] = 0
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] = 0
try:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"]
except KeyError:
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] = 0
#______________________________________________________________________
test_segment = 0
if number > self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]:
test_segment = number - self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
for i in range(0, test_segment):
data.append( self.IOST_Data["IP_N"][1] )
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] +=1
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] = 0
if IOST_WSetupTestcase_DebugEnable:
print "3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3"
print "number = ", number
print "testcase number = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
print "testcase list = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]
text = object_active.get_text()
#______________________________________________________________________
if text and text.strip():
set_object_1.set_sensitive(True)
set_object_2.set_sensitive(True)
data[number]["RunTimes"] = int(text)
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] = 1
#______________________________________________________________________
else: # string empty"
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] = 0
#______________________________________________________________________
if ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 1) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 1)):
print "x3"
set_object_1.set_active(True)
set_object_1.set_sensitive(True)
data[number]["Active"] = 'Enable'
elif ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 0) or
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 0)):
data[number]["Active"] = 'Disable'
#______________________________________________________________________
if ((self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"] == 0) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"] == 0) and
(self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"] == 0)):
set_object_1.set_active(False)
set_object_1.set_sensitive(False)
set_object_2.set_active(False)
set_object_2.set_sensitive(False)
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] -=test_segment
for i in range(test_segment, 0, -1):
del data[self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]+i]
if IOST_WSetupTestcase_DebugEnable:
print "-------------------------------------------"
print "testcase number = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
print "len data = ", len(data)
print "_CMD_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_CMD_Testcase"]
print "_DES_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_DES_Testcase"]
print "_RT_Testcase = ", self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RT_Testcase"]
print "------------------debug-------------------------"
print number
print self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]
if number == self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]:
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"] +=1
self.WSetupTestcase_CreateTestListMore(self.IOST_Objs[self.IOST_WSetupTestcase_window], self.IOST_Objs["SetupTestcaseTemplate"], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"])
self.WSetupTestcase_SetCallback(self.IOST_WSetupTestcase_window, self.IOST_Data[self.IOST_WSetupTestcase_ip_name], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"])
self.IOST_Objs[self.IOST_WSetupTestcase_window][self.IOST_WSetupTestcase_object].set_focus(self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(number+1)+"_Cmd_TE"])
adjustment = self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].get_vadjustment()
adjustment.set_value(adjustment.get_upper())
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].set_vadjustment(adjustment)
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_TestcaseList_SW"].set_placement(gtk.CORNER_BOTTOM_LEFT)
self.IOST_Objs[self.IOST_WSetupTestcase_window][self.IOST_WSetupTestcase_object].show_all()
if IOST_WSetupTestcase_DebugEnable:
print "3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3-3"
#----------------------------------------------------------------------
def WSetupTestcase_ResetBoard_CB_Callback(self, object, object_active, data=None, number=None):
"Callback function when Enable check button of a testcase have changed"
if number > self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]:
data.append( self.IOST_Data["IP_N"][1] )
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] +=1
self.IOST_Objs["SetupTestcaseTemplate"]["_"+str(number)+"_RB_Testcase"] =0
Res = object_active.get_active()
if Res:
data[int(number)]["ResetBoard"] = 'Enable'
else:
data[int(number)]["ResetBoard"] = 'Disable'
#----------------------------------------------------------------------
def WSetupTestcase_Init(self, window_name, object_name):
"""
This is the function to set init WSetupTestcase windown when the windown started.
"""
#Delete testcase list that greater than TestcaseListDefault
# print self.IOST_Objs[object_name]["TestcaseList"]
# print self.IOST_Objs[object_name]["TestcaseListDefault"]
if self.IOST_Objs[object_name]["TestcaseList"] > self.IOST_Objs[object_name]["TestcaseListDefault"]:
for testlist in range(self.IOST_Objs[object_name]["TestcaseListDefault"], self.IOST_Objs[object_name]["TestcaseList"]):
self.IOST_Objs[window_name]["_TestcaseList_VBox"].remove( self.IOST_Objs[window_name]["_"+str(testlist+1)+"_HB"])
self.IOST_Objs[window_name]["_TestcaseList_VBox"].remove( self.IOST_Objs[window_name]["_"+str(testlist+1)+"_HS"])
self.IOST_Objs[object_name]["TestcaseList"] = self.IOST_Objs[object_name]["TestcaseListDefault"]
self.WSetupTestcase_Clear(window_name)
#----------------------------------------------------------------------
def WSetupTestcase_Show(self, object_name, data_name, ip_name, testcase):
""
# print "_"+str(testcase)+"_Enable_CB"
object_name["_"+str(testcase)+"_Enable_CB"].set_sensitive(True)
object_name["_"+str(testcase)+"_ResetBoard_CB"].set_sensitive(True)
print data_name[ip_name][testcase]["Active"]
object_name["_"+str(testcase)+"_Enable_CB"].set_active(Str2Bool(data_name[ip_name][testcase]["Active"]))
object_name["_"+str(testcase)+"_Cmd_TE"].set_text(data_name[ip_name][testcase]["Testcase"])
object_name["_"+str(testcase)+"_Description_TE"].set_text(data_name[ip_name][testcase]["Description"])
object_name["_"+str(testcase)+"_RunTimes_TE"].set_text(str(data_name[ip_name][testcase]["RunTimes"]))
object_name["_"+str(testcase)+"_ResetBoard_CB"].set_active(Str2Bool(data_name[ip_name][testcase]["ResetBoard"]))
#----------------------------------------------------------------------
# MAIN FUNCTION OF CLASS
#----------------------------------------------------------------------
def WSetupTestcase_SetupTestcase(self, window_name, object_name, ip_name):
"""
"""
#-----------------------------------------
self.IOST_WSetupTestcase_ip_name = ip_name
self.IOST_WSetupTestcase_ip_data = self.IOST_Data[ip_name]
#-----------------------------------------
#Setup init when the windown start
self.IOST_Objs[window_name][object_name].set_keep_above(True)
#-----------------------------------------
#Set title to Setup test case window
self.IOST_Objs[window_name][object_name].set_title("Setup Testcase To %s" %ip_name)
#-----------------------------------------
self.WSetupTestcase_Init(window_name, "SetupTestcaseTemplate")
#-----------------------------------------
#Get number Testcase of ip_name
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] = self.IOST_Data[ip_name+"_TestCaseNum"]
# self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseListDefault"]
if IOST_WSetupTestcase_DebugEnable :
print "1. TestcaseList = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]
print "1. TestcaseDefault = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseListDefault"]
print "1. TestcaseNum = ", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
#-----------------------------------------
# if testcase number is less than or equal 20 (TestcaseListDefault)
if self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] <= self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseListDefault"]:
for test in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseListDefault"]):
self.WSetupTestcase_SetCallback(window_name, self.IOST_WSetupTestcase_ip_data, test+1)
for testcase in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]):
self.WSetupTestcase_Show(self.IOST_Objs[window_name], self.IOST_Data, ip_name, (testcase+1))
#-----------------------------------------
else:
# Testcase from 1 to testcase 20
for testcase in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseListDefault"]):
self.WSetupTestcase_SetCallback(window_name, self.IOST_Data[ip_name], testcase+1)
# print "testcase number=", testcase
self.WSetupTestcase_Show(self.IOST_Objs[window_name], self.IOST_Data, ip_name, testcase+1)
# Testcase greater than 20
for testcase in range(self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseListDefault"], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]):
# print "testcase number=", testcase
#Create more testcase slot to display testcase
self.WSetupTestcase_CreateTestListMore(self.IOST_Objs[window_name], self.IOST_Objs["SetupTestcaseTemplate"], (testcase+1))
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]+=1
self.WSetupTestcase_SetCallback(window_name, self.IOST_Data[ip_name], testcase+1)
#Display the testcase
self.WSetupTestcase_Show(self.IOST_Objs[window_name], self.IOST_Data, ip_name, (testcase+1))
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"] +=1
self.WSetupTestcase_CreateTestListMore(self.IOST_Objs[window_name], self.IOST_Objs["SetupTestcaseTemplate"], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"])
self.WSetupTestcase_SetCallback(window_name, self.IOST_Data[ip_name], self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"])
self.IOST_Objs[window_name][object_name].show_all()
#----------------------------------------------------------------------
def WSetupTestcase_CreateTestListMore(self, object_name, object_data, testcase):
""
#______________________________________________________________________
# "_19_HB",
# "_19_Enable_CB",
# "_19_Cmd_TE",
# "_19_Description_TE",
# "_19_RunTimes_TE",
# "_19_ResetBoard_CB",
#______________________________________________________________________
object_name.update({ "_"+str(testcase)+"_HB" : gtk.HBox(False, 0) } )
#______________________________________________________________________
# Radio button to Enable Testcase
# check_button_1 = gtk.CheckButton("%s" %(20+i), False)
# check_button_1.set_size_request(70,25)
# print "Name of Enable Testcase check button %s is %s" %(i, check_button_1.get_name())
# "_Enable_CB" : {
# "Text" : "",
# "SizeRequest_W" : 70,
# "SizeRequest_H" : 25,
# "CanFocus" : "False"
# },
object_name.update( { "_"+str(testcase)+"_Enable_CB" : gtk.CheckButton('%s' %(testcase), False) } )
object_name["_"+str(testcase)+"_Enable_CB"].set_size_request( object_data["_Enable_CB"]["SizeRequest_W"], object_data["_Enable_CB"]["SizeRequest_H"])
object_name["_"+str(testcase)+"_Enable_CB"].set_can_focus ( Str2Bool(object_data["_Enable_CB"]["CanFocus"]) )
#______________________________________________________________________
# Text entry 1: command run testcase
# text_entry1 = gtk.Entry()
# if i == (test_num -1):
# text_entry1.set_text("")
# else:
# text_entry1.set_text("xval add 00%s 000%s" %(i,i))
# text_entry1.set_size_request(-1, 25)
# text_entry1.set_has_frame(False)
# print "Name of CMD Text Entry %s is %s" %(i, check_button_1.get_name())
# "_Cmd_TE" :{
# "Text" : "",
# "SizeRequest_W" : -1,
# "SizeRequest_H" : 25,
# "HasFrame" : "False"
# },
object_name.update( { "_"+str(testcase)+"_Cmd_TE" : gtk.Entry() } )
object_name[ "_"+str(testcase)+"_Cmd_TE"].set_text ( object_data["_Cmd_TE"]["Text"] )
object_name[ "_"+str(testcase)+"_Cmd_TE"].set_size_request( object_data["_Cmd_TE"]["SizeRequest_W"], object_data["_Cmd_TE"]["SizeRequest_H"] )
object_name[ "_"+str(testcase)+"_Cmd_TE"].set_has_frame ( Str2Bool(object_data["_Cmd_TE"]["HasFrame"]) )
#______________________________________________________________________
# Text entry 2: Description of Testcase
# text_entry2 = gtk.Entry()
# if i == (test_num -1):
# text_entry2.set_text("")
# else:
# text_entry2.set_text("Description %s" %i)
# text_entry2.set_size_request(-1, 25)
# text_entry2.set_has_frame(False)
# print "Name of Description Text Entry %s is %s" %(i, check_button_1.get_name())
# "_Description_TE": {
# "Text" : "",
# "SizeRequest_W" : -1,
# "SizeRequest_H" : 25,
# "HasFrame" : "False"
# },
object_name.update( { "_"+str(testcase)+"_Description_TE" : gtk.Entry() } )
object_name["_"+str(testcase)+"_Description_TE"].set_text ( object_data["_Description_TE"]["Text"] )
object_name["_"+str(testcase)+"_Description_TE"].set_size_request( object_data["_Description_TE"]["SizeRequest_W"], object_data["_Description_TE"]["SizeRequest_H"] )
object_name["_"+str(testcase)+"_Description_TE"].set_has_frame ( Str2Bool(object_data["_Description_TE"]["HasFrame"]) )
#______________________________________________________________________
# Text entry 3: Number times to run testcase
# text_entry3 = gtk.Entry()
# if i == (test_num -1):
# text_entry3.set_text("")
# else:
# text_entry3.set_text("10")
# text_entry3.set_size_request(150, 25)
# text_entry3.set_has_frame(False)
# print "Name of RunTimes Text Entry %s is %s" %(i, check_button_1.get_name())
# "_RunTime_TE" :{
# "Text" : "",
# "SizeRequest_W" : 150,
# "SizeRequest_H" : 25,
# "HasFrame" : "False"
# },
object_name.update( { "_"+str(testcase)+"_RunTimes_TE" : gtk.Entry() } )
object_name["_"+str(testcase)+"_RunTimes_TE"].set_text ( str(object_data["_RunTimes_TE"]["Text"] ))
object_name["_"+str(testcase)+"_RunTimes_TE"].set_size_request( object_data["_RunTimes_TE"]["SizeRequest_W"], object_data["_RunTimes_TE"]["SizeRequest_H"] )
object_name["_"+str(testcase)+"_RunTimes_TE"].set_has_frame ( Str2Bool(object_data["_RunTimes_TE"]["HasFrame"]) )
#______________________________________________________________________
# Check button 2: Enable reset board before run testcase
# check_button_2 = gtk.CheckButton("", False)
# check_button_2.set_size_request(100,25)
# "_ResetBoard_CB" : {
# "Text" : "",
# "SizeRequest_W" : 100,
# "SizeRequest_H" : 25,
# "CanFocus" : "False"
# },
object_name.update( { "_"+str(testcase)+"_ResetBoard_CB" : gtk.CheckButton('', False) } )
object_name["_"+str(testcase)+"_ResetBoard_CB"].set_size_request( object_data["_ResetBoard_CB"]["SizeRequest_W"], object_data["_ResetBoard_CB"]["SizeRequest_H"])
object_name["_"+str(testcase)+"_ResetBoard_CB"].set_can_focus ( Str2Bool(object_data["_ResetBoard_CB"]["CanFocus"]) )
#______________________________________________________________________
# "_Hbox" : [
# //Set packing Hbox with 'position-0' to 'Enable' Check button
# {
# "Expand" : "False",
# "Fill" : "True",
# "Padding" : 0
# },
# //Set packing Hbox with 'position-1' to 'VSeparator' after 'Enable' Check button
# {
# "Expand" : "False",
# "Fill" : "True",
# "Padding" : 0
# },
# //Set packing Hbox with 'position-2' to 'CMD' Text Entry
# {
# "Expand" : "True",
# "Fill" : "True",
# "Padding" : 0
# },
# //Set packing Hbox with 'position-3' to 'VSeparator' after 'CMD' Text Entry
# {
# "Expand" : "False",
# "Fill" : "True",
# "Padding" : 0
# },
# //Set packing Hbox with 'position-4' to 'Description' Text Entry
# {
# "Expand" : "True",
# "Fill" : "True",
# "Padding" : 0
# },
# //Set packing Hbox with 'position-5' to 'VSeparator' after 'Description' Text Entry
# {
# "Expand" : "False",
# "Fill" : "True",
# "Padding" : 0
# },
# //Set packing Hbox with 'position-6' to 'RunTime' Text Entry
# {
# "Expand" : "Fale",
# "Fill" : "True",
# "Padding" : 0
# },
# //Set packing Hbox with 'position-7' to 'VSeparator' after 'Description' Text Entry
# {
# "Expand" : "False",
# "Fill" : "True",
# "Padding" : 0
# },
# //Set packing Hbox with 'position-8' to 'ResetBoard' Check button
# {
# "Expand" : "Fale",
# "Fill" : "True",
# "Padding" : 0
# }
# hbox.pack_start(check_button_1, False, True, 0)
# hbox.pack_start(gtk.VSeparator(), False, True, 0)
# hbox.pack_start(text_entry1, True, True, 0)
# hbox.pack_start(gtk.VSeparator(), False, True, 0)
# hbox.pack_start(text_entry2, True, True, 0)
# hbox.pack_start(gtk.VSeparator(), False, True, 0)
# hbox.pack_start(text_entry3, False, True, 0)
# hbox.pack_start(gtk.VSeparator(), False, True, 0)
# hbox.pack_start(check_button_2, False, True, 0)
# object_name["_"+str(testcase)+"_Enable_CB"]
# object_name[ "_"+str(testcase)+"_Cmd_TE"]
# object_name["_"+str(testcase)+"_Description_TE"]
# object_name["_"+str(testcase)+"_RunTimes_TE"]
# object_name["_"+str(testcase)+"_ResetBoard_CB"]
# 1
object_name["_"+str(testcase)+"_HB"].pack_start(object_name["_"+str(testcase)+"_Enable_CB"], Str2Bool(object_data["_Hbox"][0]["Expand"]), Str2Bool(object_data["_Hbox"][0]["Fill"]), object_data["_Hbox"][0]["Padding"])
# 2
object_name["_"+str(testcase)+"_HB"].pack_start(gtk.VSeparator(), Str2Bool(object_data["_Hbox"][1]["Expand"]), Str2Bool(object_data["_Hbox"][1]["Fill"]), object_data["_Hbox"][1]["Padding"])
# 3
object_name["_"+str(testcase)+"_HB"].pack_start(object_name[ "_"+str(testcase)+"_Cmd_TE"], Str2Bool(object_data["_Hbox"][2]["Expand"]), Str2Bool(object_data["_Hbox"][2]["Fill"]), object_data["_Hbox"][2]["Padding"])
# 4
object_name["_"+str(testcase)+"_HB"].pack_start(gtk.VSeparator(), Str2Bool(object_data["_Hbox"][3]["Expand"]), Str2Bool(object_data["_Hbox"][3]["Fill"]), object_data["_Hbox"][3]["Padding"])
# 5
object_name["_"+str(testcase)+"_HB"].pack_start(object_name["_"+str(testcase)+"_Description_TE"], Str2Bool(object_data["_Hbox"][4]["Expand"]), Str2Bool(object_data["_Hbox"][4]["Fill"]), object_data["_Hbox"][4]["Padding"])
# 6
object_name["_"+str(testcase)+"_HB"].pack_start(gtk.VSeparator(), Str2Bool(object_data["_Hbox"][5]["Expand"]), Str2Bool(object_data["_Hbox"][5]["Fill"]), object_data["_Hbox"][5]["Padding"])
# 7
object_name["_"+str(testcase)+"_HB"].pack_start(object_name["_"+str(testcase)+"_RunTimes_TE"], Str2Bool(object_data["_Hbox"][6]["Expand"]), Str2Bool(object_data["_Hbox"][6]["Fill"]), object_data["_Hbox"][6]["Padding"])
# 8
object_name["_"+str(testcase)+"_HB"].pack_start(gtk.VSeparator(), Str2Bool(object_data["_Hbox"][7]["Expand"]), Str2Bool(object_data["_Hbox"][7]["Fill"]), object_data["_Hbox"][7]["Padding"])
# 9
object_name["_"+str(testcase)+"_HB"].pack_start(object_name["_"+str(testcase)+"_ResetBoard_CB"], Str2Bool(object_data["_Hbox"][8]["Expand"]), Str2Bool(object_data["_Hbox"][8]["Fill"]), object_data["_Hbox"][8]["Padding"])
#______________________________________________________________________
# self.IOST_Objs[window_name]["_TestcaseList_VBox"].add(hbox)
# self.IOST_Objs[window_name]["_TestcaseList_VBox"].add(gtk.HSeparator())
object_name["_TestcaseList_VBox"].pack_start(object_name["_"+str(testcase)+"_HB"], False, True, 0)
object_name.update( { "_"+str(testcase)+"_HS" : gtk.HSeparator() } )
object_name["_TestcaseList_VBox"].pack_start(object_name["_"+str(testcase)+"_HS"], False, True, 0)
# self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]+=1
#----------------------------------------------------------------------
def on_IOST_WSetupTestcase_Skylark_delete_event(self, object, event, data=None):
self.IOST_Objs[self.IOST_WSetupTestcase_window][self.IOST_WSetupTestcase_object].hide()
return True
# gtk.main_quit()
#----------------------------------------------------------------------
def on_IOST_WSetupTestcase_Skylark_destroy_event(self, object, event, data=None):
# self.IOST_Objs[self.IOST_WSetupTestcase_window][self.IOST_WSetupTestcase_object].hide()
# gtk.main_quit()
return True
#----------------------------------------------------------------------
def on_IOST_WSetupTestcase_CTRL_Cancel_B_clicked(self, object, data=None):
"""
"""
self.IOST_Objs[self.IOST_WSetupTestcase_window][self.IOST_WSetupTestcase_object].hide()
def on_IOST_WSetupTestcase_CTRL_OK_B_clicked(self, object, data=None):
"""
"""
# self.IOST_Objs[window_name]["_"+str(i+1)+"_Enable_CB"].set_active(False)
# self.IOST_Objs[window_name]["_"+str(i+1)+"_Cmd_TE"].set_text("")
# self.IOST_Objs[window_name]["_"+str(i+1)+"_Description_TE"].set_text("")
# self.IOST_Objs[window_name]["_"+str(i+1)+"_RunTimes_TE"].set_text("")
# self.IOST_Objs[window_name]["_"+str(i+1)+"_ResetBoard_CB"].set_active(False)
testcase_result =["Enable"]
for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]):
# if text and text.strip():
print i
if ( self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Cmd_TE"].get_text() and \
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Cmd_TE"].get_text().strip()) or \
(self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Description_TE"].get_text() and \
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Description_TE"].get_text().strip()) or \
(self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_RunTimes_TE"].get_text() and \
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_RunTimes_TE"].get_text().strip()):
print "___", i
if (self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_RunTimes_TE"].get_text() and \
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_RunTimes_TE"].get_text().strip()):
rt = int(self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_RunTimes_TE"].get_text())
else:
rt = 1
testcase_result.append(
{
"Testcase" : self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Cmd_TE"].get_text(),
"Description" : self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Description_TE"].get_text(),
"Active" : Bool2Str( self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Enable_CB"].get_active() ),
"RunTimes" : rt,
"ResetBoard" : Bool2Str(self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_ResetBoard_CB"].get_active())
}
)
self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"] = len(testcase_result) -1
self.IOST_Data[self.IOST_WSetupTestcase_ip_name] = testcase_result
self.IOST_Data[self.IOST_WSetupTestcase_ip_name +"_TestCaseNum"] = len(testcase_result) - 1
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "TestcaseList :", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]
print "TestcaseListDefault :", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseListDefault"]
print "TestcaseNum :", self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]
print self.IOST_Data[self.IOST_WSetupTestcase_ip_name]
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
self.IOST_Objs[self.IOST_WSetupTestcase_window][self.IOST_WSetupTestcase_object].hide()
#----------------------------------------------------------------------
def on_IOST_WSetupTestcase_SelectAll_CB_clicked(self, object, data=None):
"""
"""
active = self.IOST_Objs[self.IOST_WSetupTestcase_window]["_SelectAll_CB"].get_active()
for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]):
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Enable_CB"].set_active(active)
# if active:
# Text="Do you want Selected all testcase ?"
# Res = MsgConfirm(Text)
# if Res == gtk.RESPONSE_OK:
# for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"]):
# self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Enable_CB"].set_active(active)
# else:
# self.IOST_Objs[self.IOST_WSetupTestcase_window]["_SelectAll_CB"].set_active(not active)
# # pass
# else:
# Text="Do you want Unselected all testcase ?"
# Res = MsgConfirm(Text)
# if Res == gtk.RESPONSE_OK:
# for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseList"] ):
# self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Enable_CB"].set_active(active)
# else:
# self.IOST_Objs[self.IOST_WSetupTestcase_window]["_SelectAll_CB"].set_active(not active)
# # pass
#----------------------------------------------------------------------
def on_IOST_WSetupTestcase_ResetAll_CB_toggled(self, object, data=None):
"""
"""
active = self.IOST_Objs[self.IOST_WSetupTestcase_window]["_ResetAll_CB"].get_active()
if active:
for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]):
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_ResetBoard_CB"].set_active(active)
else:
for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]):
self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_ResetBoard_CB"].set_active(active)
# active = self.IOST_Objs[self.IOST_WSetupTestcase_window]["_SelectAll_CB"].get_active()
# for i in range(0, self.IOST_Objs["SetupTestcaseTemplate"]["TestcaseNum"]):
# self.IOST_Objs[self.IOST_WSetupTestcase_window]["_"+str(i+1)+"_Enable_CB"].set_active(active)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
"""
if False:
test_num = 11
for i in range(0, test_num):
# lable_i = gtk.Label("THis is a lable %s" %i)
print "==================================== %s =============================" %i
hbox = gtk.HBox(False, 0)
# Radio button to Enable Testcase
check_button_1 = gtk.CheckButton("%s" %(20+i), False)
check_button_1.set_size_request(70,25)
print "Name of Enable Testcase check button %s is %s" %(i, check_button_1.get_name())
# Text entry 1: command run testcase
text_entry1 = gtk.Entry()
if i == (test_num -1):
text_entry1.set_text("")
else:
text_entry1.set_text("xval add 00%s 000%s" %(i,i))
text_entry1.set_size_request(-1, 25)
text_entry1.set_has_frame(False)
print "Name of CMD Text Entry %s is %s" %(i, check_button_1.get_name())
# Text entry 2: Description of Testcase
text_entry2 = gtk.Entry()
if i == (test_num -1):
text_entry2.set_text("")
else:
text_entry2.set_text("Description %s" %i)
text_entry2.set_size_request(-1, 25)
text_entry2.set_has_frame(False)
print "Name of Description Text Entry %s is %s" %(i, check_button_1.get_name())
# Text entry 3: Number times to run testcase
text_entry3 = gtk.Entry()
if i == (test_num -1):
text_entry3.set_text("")
else:
text_entry3.set_text("10")
text_entry3.set_size_request(150, 25)
text_entry3.set_has_frame(False)
print "Name of RunTimes Text Entry %s is %s" %(i, check_button_1.get_name())
# Check button 2: Enable reset board before run testcase
check_button_2 = gtk.CheckButton("", False)
check_button_2.set_size_request(100,25)
hbox.pack_start(check_button_1, False, True, 0)
hbox.pack_start(gtk.VSeparator(), False, True, 0)
hbox.pack_start(text_entry1, True, True, 0)
hbox.pack_start(gtk.VSeparator(), False, True, 0)
hbox.pack_start(text_entry2, True, True, 0)
hbox.pack_start(gtk.VSeparator(), False, True, 0)
hbox.pack_start(text_entry3, False, True, 0)
hbox.pack_start(gtk.VSeparator(), False, True, 0)
hbox.pack_start(check_button_2, False, True, 0)
self.IOST_Objs[window_name]["_TestcaseList_VBox"].add(hbox)
self.IOST_Objs[window_name]["_TestcaseList_VBox"].add(gtk.HSeparator())
self.IOST_Objs[window_name][object_name].show_all()
# self.IOST_Objs[window_name][window_name+object_name].hide()
"""
#----------------------------------------------------------------------
|
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import namedtuple
from functools import partial
import six
from cassandra import InvalidRequest
from cassandra.cluster import UserTypeDoesNotExist, ExecutionProfile, EXEC_PROFILE_DEFAULT
from cassandra.query import dict_factory
from cassandra.util import OrderedMap
from tests.integration import use_singledc, execute_until_pass, \
BasicSegregatedKeyspaceUnitTestCase, greaterthancass20, lessthancass30, greaterthanorequalcass36, TestCluster
from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, PRIMITIVE_DATATYPES_KEYS, \
COLLECTION_TYPES, get_sample, get_collection_sample
nested_collection_udt = namedtuple('nested_collection_udt', ['m', 't', 'l', 's'])
nested_collection_udt_nested = namedtuple('nested_collection_udt_nested', ['m', 't', 'l', 's', 'u'])
def setup_module():
use_singledc()
update_datatypes()
@greaterthancass20
class UDTTests(BasicSegregatedKeyspaceUnitTestCase):
@property
def table_name(self):
return self._testMethodName.lower()
def setUp(self):
super(UDTTests, self).setUp()
self.session.set_keyspace(self.keyspace_name)
@greaterthanorequalcass36
def test_non_frozen_udts(self):
"""
Test to ensure that non frozen udt's work with C* >3.6.
@since 3.7.0
@jira_ticket PYTHON-498
@expected_result Non frozen UDT's are supported
@test_category data_types, udt
"""
self.session.execute("USE {0}".format(self.keyspace_name))
self.session.execute("CREATE TYPE user (state text, has_corn boolean)")
self.session.execute("CREATE TABLE {0} (a int PRIMARY KEY, b user)".format(self.function_table_name))
User = namedtuple('user', ('state', 'has_corn'))
self.cluster.register_user_type(self.keyspace_name, "user", User)
self.session.execute("INSERT INTO {0} (a, b) VALUES (%s, %s)".format(self.function_table_name), (0, User("Nebraska", True)))
self.session.execute("UPDATE {0} SET b.has_corn = False where a = 0".format(self.function_table_name))
result = self.session.execute("SELECT * FROM {0}".format(self.function_table_name))
self.assertFalse(result[0].b.has_corn)
table_sql = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].as_cql_query()
self.assertNotIn("<frozen>", table_sql)
def test_can_insert_unprepared_registered_udts(self):
"""
Test the insertion of unprepared, registered UDTs
"""
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('age', 'name'))
c.register_user_type(self.keyspace_name, "user", User)
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User(42, 'bob')))
result = s.execute("SELECT b FROM mytable WHERE a=0")
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
self.assertTrue(type(row.b) is User)
# use the same UDT name in a different keyspace
s.execute("""
CREATE KEYSPACE udt_test_unprepared_registered2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_unprepared_registered2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('state', 'is_cool'))
c.register_user_type("udt_test_unprepared_registered2", "user", User)
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User('Texas', True)))
result = s.execute("SELECT b FROM mytable WHERE a=0")
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
self.assertTrue(type(row.b) is User)
s.execute("DROP KEYSPACE udt_test_unprepared_registered2")
c.shutdown()
def test_can_register_udt_before_connecting(self):
"""
Test the registration of UDTs before session creation
"""
c = TestCluster()
s = c.connect(wait_for_all_pools=True)
s.execute("""
CREATE KEYSPACE udt_test_register_before_connecting
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_register_before_connecting")
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
s.execute("""
CREATE KEYSPACE udt_test_register_before_connecting2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_register_before_connecting2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
# now that types are defined, shutdown and re-create Cluster
c.shutdown()
c = TestCluster()
User1 = namedtuple('user', ('age', 'name'))
User2 = namedtuple('user', ('state', 'is_cool'))
c.register_user_type("udt_test_register_before_connecting", "user", User1)
c.register_user_type("udt_test_register_before_connecting2", "user", User2)
s = c.connect(wait_for_all_pools=True)
s.set_keyspace("udt_test_register_before_connecting")
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User1(42, 'bob')))
result = s.execute("SELECT b FROM mytable WHERE a=0")
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
self.assertTrue(type(row.b) is User1)
# use the same UDT name in a different keyspace
s.set_keyspace("udt_test_register_before_connecting2")
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User2('Texas', True)))
result = s.execute("SELECT b FROM mytable WHERE a=0")
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
self.assertTrue(type(row.b) is User2)
s.execute("DROP KEYSPACE udt_test_register_before_connecting")
s.execute("DROP KEYSPACE udt_test_register_before_connecting2")
c.shutdown()
def test_can_insert_prepared_unregistered_udts(self):
"""
Test the insertion of prepared, unregistered UDTs
"""
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('age', 'name'))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User(42, 'bob')))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
# use the same UDT name in a different keyspace
s.execute("""
CREATE KEYSPACE udt_test_prepared_unregistered2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_prepared_unregistered2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('state', 'is_cool'))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User('Texas', True)))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
s.execute("DROP KEYSPACE udt_test_prepared_unregistered2")
c.shutdown()
def test_can_insert_prepared_registered_udts(self):
"""
Test the insertion of prepared, registered UDTs
"""
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.execute("CREATE TYPE user (age int, name text)")
User = namedtuple('user', ('age', 'name'))
c.register_user_type(self.keyspace_name, "user", User)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User(42, 'bob')))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
self.assertTrue(type(row.b) is User)
# use the same UDT name in a different keyspace
s.execute("""
CREATE KEYSPACE udt_test_prepared_registered2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_prepared_registered2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
User = namedtuple('user', ('state', 'is_cool'))
c.register_user_type("udt_test_prepared_registered2", "user", User)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User('Texas', True)))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
self.assertTrue(type(row.b) is User)
s.execute("DROP KEYSPACE udt_test_prepared_registered2")
c.shutdown()
def test_can_insert_udts_with_nulls(self):
"""
Test the insertion of UDTs with null and empty string fields
"""
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.execute("CREATE TYPE user (a text, b int, c uuid, d blob)")
User = namedtuple('user', ('a', 'b', 'c', 'd'))
c.register_user_type(self.keyspace_name, "user", User)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (0, ?)")
s.execute(insert, [User(None, None, None, None)])
results = s.execute("SELECT b FROM mytable WHERE a=0")
self.assertEqual((None, None, None, None), results[0].b)
select = s.prepare("SELECT b FROM mytable WHERE a=0")
self.assertEqual((None, None, None, None), s.execute(select)[0].b)
# also test empty strings
s.execute(insert, [User('', None, None, six.binary_type())])
results = s.execute("SELECT b FROM mytable WHERE a=0")
self.assertEqual(('', None, None, six.binary_type()), results[0].b)
c.shutdown()
def test_can_insert_udts_with_varying_lengths(self):
"""
Test for ensuring extra-lengthy udts are properly inserted
"""
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
max_test_length = 254
# create the seed udt, increase timeout to avoid the query failure on slow systems
s.execute("CREATE TYPE lengthy_udt ({0})"
.format(', '.join(['v_{0} int'.format(i)
for i in range(max_test_length)])))
# create a table with multiple sizes of nested udts
# no need for all nested types, only a spot checked few and the largest one
s.execute("CREATE TABLE mytable ("
"k int PRIMARY KEY, "
"v frozen<lengthy_udt>)")
# create and register the seed udt type
udt = namedtuple('lengthy_udt', tuple(['v_{0}'.format(i) for i in range(max_test_length)]))
c.register_user_type(self.keyspace_name, "lengthy_udt", udt)
# verify inserts and reads
for i in (0, 1, 2, 3, max_test_length):
# create udt
params = [j for j in range(i)] + [None for j in range(max_test_length - i)]
created_udt = udt(*params)
# write udt
s.execute("INSERT INTO mytable (k, v) VALUES (0, %s)", (created_udt,))
# verify udt was written and read correctly, increase timeout to avoid the query failure on slow systems
result = s.execute("SELECT v FROM mytable WHERE k=0")[0]
self.assertEqual(created_udt, result.v)
c.shutdown()
def nested_udt_schema_helper(self, session, max_nesting_depth):
# create the seed udt
execute_until_pass(session, "CREATE TYPE depth_0 (age int, name text)")
# create the nested udts
for i in range(max_nesting_depth):
execute_until_pass(session, "CREATE TYPE depth_{0} (value frozen<depth_{1}>)".format(i + 1, i))
# create a table with multiple sizes of nested udts
# no need for all nested types, only a spot checked few and the largest one
execute_until_pass(session, "CREATE TABLE mytable ("
"k int PRIMARY KEY, "
"v_0 frozen<depth_0>, "
"v_1 frozen<depth_1>, "
"v_2 frozen<depth_2>, "
"v_3 frozen<depth_3>, "
"v_{0} frozen<depth_{0}>)".format(max_nesting_depth))
def nested_udt_creation_helper(self, udts, i):
if i == 0:
return udts[0](42, 'Bob')
else:
return udts[i](self.nested_udt_creation_helper(udts, i - 1))
def nested_udt_verification_helper(self, session, max_nesting_depth, udts):
for i in (0, 1, 2, 3, max_nesting_depth):
# create udt
udt = self.nested_udt_creation_helper(udts, i)
# write udt via simple statement
session.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", [i, udt])
# verify udt was written and read correctly
result = session.execute("SELECT v_{0} FROM mytable WHERE k=0".format(i))[0]
self.assertEqual(udt, result["v_{0}".format(i)])
# write udt via prepared statement
insert = session.prepare("INSERT INTO mytable (k, v_{0}) VALUES (1, ?)".format(i))
session.execute(insert, [udt])
# verify udt was written and read correctly
result = session.execute("SELECT v_{0} FROM mytable WHERE k=1".format(i))[0]
self.assertEqual(udt, result["v_{0}".format(i)])
def _cluster_default_dict_factory(self):
return TestCluster(
execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}
)
def test_can_insert_nested_registered_udts(self):
"""
Test for ensuring nested registered udts are properly inserted
"""
with self._cluster_default_dict_factory() as c:
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
max_nesting_depth = 16
# create the schema
self.nested_udt_schema_helper(s, max_nesting_depth)
# create and register the seed udt type
udts = []
udt = namedtuple('depth_0', ('age', 'name'))
udts.append(udt)
c.register_user_type(self.keyspace_name, "depth_0", udts[0])
# create and register the nested udt types
for i in range(max_nesting_depth):
udt = namedtuple('depth_{0}'.format(i + 1), ('value'))
udts.append(udt)
c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1])
# insert udts and verify inserts with reads
self.nested_udt_verification_helper(s, max_nesting_depth, udts)
def test_can_insert_nested_unregistered_udts(self):
"""
Test for ensuring nested unregistered udts are properly inserted
"""
with self._cluster_default_dict_factory() as c:
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
max_nesting_depth = 16
# create the schema
self.nested_udt_schema_helper(s, max_nesting_depth)
# create the seed udt type
udts = []
udt = namedtuple('depth_0', ('age', 'name'))
udts.append(udt)
# create the nested udt types
for i in range(max_nesting_depth):
udt = namedtuple('depth_{0}'.format(i + 1), ('value'))
udts.append(udt)
# insert udts via prepared statements and verify inserts with reads
for i in (0, 1, 2, 3, max_nesting_depth):
# create udt
udt = self.nested_udt_creation_helper(udts, i)
# write udt
insert = s.prepare("INSERT INTO mytable (k, v_{0}) VALUES (0, ?)".format(i))
s.execute(insert, [udt])
# verify udt was written and read correctly
result = s.execute("SELECT v_{0} FROM mytable WHERE k=0".format(i))[0]
self.assertEqual(udt, result["v_{0}".format(i)])
def test_can_insert_nested_registered_udts_with_different_namedtuples(self):
"""
Test for ensuring nested udts are inserted correctly when the
created namedtuples are use names that are different the cql type.
"""
with self._cluster_default_dict_factory() as c:
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
max_nesting_depth = 16
# create the schema
self.nested_udt_schema_helper(s, max_nesting_depth)
# create and register the seed udt type
udts = []
udt = namedtuple('level_0', ('age', 'name'))
udts.append(udt)
c.register_user_type(self.keyspace_name, "depth_0", udts[0])
# create and register the nested udt types
for i in range(max_nesting_depth):
udt = namedtuple('level_{0}'.format(i + 1), ('value'))
udts.append(udt)
c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1])
# insert udts and verify inserts with reads
self.nested_udt_verification_helper(s, max_nesting_depth, udts)
def test_raise_error_on_nonexisting_udts(self):
"""
Test for ensuring that an error is raised for operating on a nonexisting udt or an invalid keyspace
"""
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
User = namedtuple('user', ('age', 'name'))
with self.assertRaises(UserTypeDoesNotExist):
c.register_user_type("some_bad_keyspace", "user", User)
with self.assertRaises(UserTypeDoesNotExist):
c.register_user_type("system", "user", User)
with self.assertRaises(InvalidRequest):
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
c.shutdown()
def test_can_insert_udt_all_datatypes(self):
"""
Test for inserting various types of PRIMITIVE_DATATYPES into UDT's
"""
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
# create UDT
alpha_type_list = []
start_index = ord('a')
for i, datatype in enumerate(PRIMITIVE_DATATYPES):
alpha_type_list.append("{0} {1}".format(chr(start_index + i), datatype))
s.execute("""
CREATE TYPE alldatatypes ({0})
""".format(', '.join(alpha_type_list))
)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<alldatatypes>)")
# register UDT
alphabet_list = []
for i in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES)):
alphabet_list.append('{0}'.format(chr(i)))
Alldatatypes = namedtuple("alldatatypes", alphabet_list)
c.register_user_type(self.keyspace_name, "alldatatypes", Alldatatypes)
# insert UDT data
params = []
for datatype in PRIMITIVE_DATATYPES:
params.append((get_sample(datatype)))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, Alldatatypes(*params)))
# retrieve and verify data
results = s.execute("SELECT * FROM mytable")
row = results[0].b
for expected, actual in zip(params, row):
self.assertEqual(expected, actual)
c.shutdown()
def test_can_insert_udt_all_collection_datatypes(self):
"""
Test for inserting various types of COLLECTION_TYPES into UDT's
"""
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
# create UDT
alpha_type_list = []
start_index = ord('a')
for i, collection_type in enumerate(COLLECTION_TYPES):
for j, datatype in enumerate(PRIMITIVE_DATATYPES_KEYS):
if collection_type == "map":
type_string = "{0}_{1} {2}<{3}, {3}>".format(chr(start_index + i), chr(start_index + j),
collection_type, datatype)
elif collection_type == "tuple":
type_string = "{0}_{1} frozen<{2}<{3}>>".format(chr(start_index + i), chr(start_index + j),
collection_type, datatype)
else:
type_string = "{0}_{1} {2}<{3}>".format(chr(start_index + i), chr(start_index + j),
collection_type, datatype)
alpha_type_list.append(type_string)
s.execute("""
CREATE TYPE alldatatypes ({0})
""".format(', '.join(alpha_type_list))
)
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<alldatatypes>)")
# register UDT
alphabet_list = []
for i in range(ord('a'), ord('a') + len(COLLECTION_TYPES)):
for j in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES_KEYS)):
alphabet_list.append('{0}_{1}'.format(chr(i), chr(j)))
Alldatatypes = namedtuple("alldatatypes", alphabet_list)
c.register_user_type(self.keyspace_name, "alldatatypes", Alldatatypes)
# insert UDT data
params = []
for collection_type in COLLECTION_TYPES:
for datatype in PRIMITIVE_DATATYPES_KEYS:
params.append((get_collection_sample(collection_type, datatype)))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, Alldatatypes(*params)))
# retrieve and verify data
results = s.execute("SELECT * FROM mytable")
row = results[0].b
for expected, actual in zip(params, row):
self.assertEqual(expected, actual)
c.shutdown()
def insert_select_column(self, session, table_name, column_name, value):
insert = session.prepare("INSERT INTO %s (k, %s) VALUES (?, ?)" % (table_name, column_name))
session.execute(insert, (0, value))
result = session.execute("SELECT %s FROM %s WHERE k=%%s" % (column_name, table_name), (0,))[0][0]
self.assertEqual(result, value)
def test_can_insert_nested_collections(self):
"""
Test for inserting various types of nested COLLECTION_TYPES into tables and UDTs
"""
if self.cass_version < (2, 1, 3):
raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3")
c = TestCluster()
s = c.connect(self.keyspace_name, wait_for_all_pools=True)
s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple
name = self._testMethodName
s.execute("""
CREATE TYPE %s (
m frozen<map<int,text>>,
t tuple<int,text>,
l frozen<list<int>>,
s frozen<set<int>>
)""" % name)
s.execute("""
CREATE TYPE %s_nested (
m frozen<map<int,text>>,
t tuple<int,text>,
l frozen<list<int>>,
s frozen<set<int>>,
u frozen<%s>
)""" % (name, name))
s.execute("""
CREATE TABLE %s (
k int PRIMARY KEY,
map_map map<frozen<map<int,int>>, frozen<map<int,int>>>,
map_set map<frozen<set<int>>, frozen<set<int>>>,
map_list map<frozen<list<int>>, frozen<list<int>>>,
map_tuple map<frozen<tuple<int, int>>, frozen<tuple<int>>>,
map_udt map<frozen<%s_nested>, frozen<%s>>,
)""" % (name, name, name))
validate = partial(self.insert_select_column, s, name)
validate('map_map', OrderedMap([({1: 1, 2: 2}, {3: 3, 4: 4}), ({5: 5, 6: 6}, {7: 7, 8: 8})]))
validate('map_set', OrderedMap([(set((1, 2)), set((3, 4))), (set((5, 6)), set((7, 8)))]))
validate('map_list', OrderedMap([([1, 2], [3, 4]), ([5, 6], [7, 8])]))
validate('map_tuple', OrderedMap([((1, 2), (3,)), ((4, 5), (6,))]))
value = nested_collection_udt({1: 'v1', 2: 'v2'}, (3, 'v3'), [4, 5, 6, 7], set((8, 9, 10)))
key = nested_collection_udt_nested(value.m, value.t, value.l, value.s, value)
key2 = nested_collection_udt_nested({3: 'v3'}, value.t, value.l, value.s, value)
validate('map_udt', OrderedMap([(key, value), (key2, value)]))
c.shutdown()
def test_non_alphanum_identifiers(self):
"""
PYTHON-413
"""
s = self.session
non_alphanum_name = 'test.field@#$%@%#!'
type_name = 'type2'
s.execute('CREATE TYPE "%s" ("%s" text)' % (non_alphanum_name, non_alphanum_name))
s.execute('CREATE TYPE %s ("%s" text)' % (type_name, non_alphanum_name))
# table with types as map keys to make sure the tuple lookup works
s.execute('CREATE TABLE %s (k int PRIMARY KEY, non_alphanum_type_map map<frozen<"%s">, int>, alphanum_type_map map<frozen<%s>, int>)' % (self.table_name, non_alphanum_name, type_name))
s.execute('INSERT INTO %s (k, non_alphanum_type_map, alphanum_type_map) VALUES (%s, {{"%s": \'nonalphanum\'}: 0}, {{"%s": \'alphanum\'}: 1})' % (self.table_name, 0, non_alphanum_name, non_alphanum_name))
row = s.execute('SELECT * FROM %s' % (self.table_name,))[0]
k, v = row.non_alphanum_type_map.popitem()
self.assertEqual(v, 0)
self.assertEqual(k.__class__, tuple)
self.assertEqual(k[0], 'nonalphanum')
k, v = row.alphanum_type_map.popitem()
self.assertEqual(v, 1)
self.assertNotEqual(k.__class__, tuple) # should be the namedtuple type
self.assertEqual(k[0], 'alphanum')
self.assertEqual(k.field_0_, 'alphanum') # named tuple with positional field name
@lessthancass30
def test_type_alteration(self):
"""
Support for ALTER TYPE was removed in CASSANDRA-12443
"""
s = self.session
type_name = "type_name"
self.assertNotIn(type_name, s.cluster.metadata.keyspaces['udttests'].user_types)
s.execute('CREATE TYPE %s (v0 int)' % (type_name,))
self.assertIn(type_name, s.cluster.metadata.keyspaces['udttests'].user_types)
s.execute('CREATE TABLE %s (k int PRIMARY KEY, v frozen<%s>)' % (self.table_name, type_name))
s.execute('INSERT INTO %s (k, v) VALUES (0, {v0 : 1})' % (self.table_name,))
s.cluster.register_user_type('udttests', type_name, dict)
val = s.execute('SELECT v FROM %s' % self.table_name)[0][0]
self.assertEqual(val['v0'], 1)
# add field
s.execute('ALTER TYPE %s ADD v1 text' % (type_name,))
val = s.execute('SELECT v FROM %s' % self.table_name)[0][0]
self.assertEqual(val['v0'], 1)
self.assertIsNone(val['v1'])
s.execute("INSERT INTO %s (k, v) VALUES (0, {v0 : 2, v1 : 'sometext'})" % (self.table_name,))
val = s.execute('SELECT v FROM %s' % self.table_name)[0][0]
self.assertEqual(val['v0'], 2)
self.assertEqual(val['v1'], 'sometext')
# alter field type
s.execute('ALTER TYPE %s ALTER v1 TYPE blob' % (type_name,))
s.execute("INSERT INTO %s (k, v) VALUES (0, {v0 : 3, v1 : 0xdeadbeef})" % (self.table_name,))
val = s.execute('SELECT v FROM %s' % self.table_name)[0][0]
self.assertEqual(val['v0'], 3)
self.assertEqual(val['v1'], six.b('\xde\xad\xbe\xef'))
@lessthancass30
def test_alter_udt(self):
"""
Test to ensure that altered UDT's are properly surfaced without needing to restart the underlying session.
@since 3.0.0
@jira_ticket PYTHON-226
@expected_result UDT's will reflect added columns without a session restart.
@test_category data_types, udt
"""
# Create udt ensure it has the proper column names.
self.session.set_keyspace(self.keyspace_name)
self.session.execute("CREATE TYPE typetoalter (a int)")
typetoalter = namedtuple('typetoalter', ('a'))
self.session.execute("CREATE TABLE {0} (pk int primary key, typetoalter frozen<typetoalter>)".format(self.function_table_name))
insert_statement = self.session.prepare("INSERT INTO {0} (pk, typetoalter) VALUES (?, ?)".format(self.function_table_name))
self.session.execute(insert_statement, [1, typetoalter(1)])
results = self.session.execute("SELECT * from {0}".format(self.function_table_name))
for result in results:
self.assertTrue(hasattr(result.typetoalter, 'a'))
self.assertFalse(hasattr(result.typetoalter, 'b'))
# Alter UDT and ensure the alter is honored in results
self.session.execute("ALTER TYPE typetoalter add b int")
typetoalter = namedtuple('typetoalter', ('a', 'b'))
self.session.execute(insert_statement, [2, typetoalter(2, 2)])
results = self.session.execute("SELECT * from {0}".format(self.function_table_name))
for result in results:
self.assertTrue(hasattr(result.typetoalter, 'a'))
self.assertTrue(hasattr(result.typetoalter, 'b'))
|
|
import unittest
from mongoengine import *
from mongoengine.connection import get_db
__all__ = ("DynamicTest", )
class DynamicTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
class Person(DynamicDocument):
name = StringField()
meta = {'allow_inheritance': True}
Person.drop_collection()
self.Person = Person
def test_simple_dynamic_document(self):
"""Ensures simple dynamic documents are saved correctly"""
p = self.Person()
p.name = "James"
p.age = 34
self.assertEqual(p.to_mongo(), {"_cls": "Person", "name": "James",
"age": 34})
self.assertEqual(p.to_mongo().keys(), ["_cls", "name", "age"])
p.save()
self.assertEqual(p.to_mongo().keys(), ["_id", "_cls", "name", "age"])
self.assertEqual(self.Person.objects.first().age, 34)
# Confirm no changes to self.Person
self.assertFalse(hasattr(self.Person, 'age'))
def test_change_scope_of_variable(self):
"""Test changing the scope of a dynamic field has no adverse effects"""
p = self.Person()
p.name = "Dean"
p.misc = 22
p.save()
p = self.Person.objects.get()
p.misc = {'hello': 'world'}
p.save()
p = self.Person.objects.get()
self.assertEqual(p.misc, {'hello': 'world'})
def test_delete_dynamic_field(self):
"""Test deleting a dynamic field works"""
self.Person.drop_collection()
p = self.Person()
p.name = "Dean"
p.misc = 22
p.save()
p = self.Person.objects.get()
p.misc = {'hello': 'world'}
p.save()
p = self.Person.objects.get()
self.assertEqual(p.misc, {'hello': 'world'})
collection = self.db[self.Person._get_collection_name()]
obj = collection.find_one()
self.assertEqual(sorted(obj.keys()), ['_cls', '_id', 'misc', 'name'])
del p.misc
p.save()
p = self.Person.objects.get()
self.assertFalse(hasattr(p, 'misc'))
obj = collection.find_one()
self.assertEqual(sorted(obj.keys()), ['_cls', '_id', 'name'])
def test_reload_after_unsetting(self):
p = self.Person()
p.misc = 22
p.save()
p.update(unset__misc=1)
p.reload()
def test_reload_dynamic_field(self):
self.Person.objects.delete()
p = self.Person.objects.create()
p.update(age=1)
self.assertEqual(len(p._data), 3)
self.assertEqual(sorted(p._data.keys()), ['_cls', 'id', 'name'])
p.reload()
self.assertEqual(len(p._data), 4)
self.assertEqual(sorted(p._data.keys()), ['_cls', 'age', 'id', 'name'])
def test_dynamic_document_queries(self):
"""Ensure we can query dynamic fields"""
p = self.Person()
p.name = "Dean"
p.age = 22
p.save()
self.assertEqual(1, self.Person.objects(age=22).count())
p = self.Person.objects(age=22)
p = p.get()
self.assertEqual(22, p.age)
def test_complex_dynamic_document_queries(self):
class Person(DynamicDocument):
name = StringField()
Person.drop_collection()
p = Person(name="test")
p.age = "ten"
p.save()
p1 = Person(name="test1")
p1.age = "less then ten and a half"
p1.save()
p2 = Person(name="test2")
p2.age = 10
p2.save()
self.assertEqual(Person.objects(age__icontains='ten').count(), 2)
self.assertEqual(Person.objects(age__gte=10).count(), 1)
def test_complex_data_lookups(self):
"""Ensure you can query dynamic document dynamic fields"""
p = self.Person()
p.misc = {'hello': 'world'}
p.save()
self.assertEqual(1, self.Person.objects(misc__hello='world').count())
def test_three_level_complex_data_lookups(self):
"""Ensure you can query three level document dynamic fields"""
p = self.Person.objects.create(
misc={'hello': {'hello2': 'world'}}
)
self.assertEqual(1, self.Person.objects(misc__hello__hello2='world').count())
def test_complex_embedded_document_validation(self):
"""Ensure embedded dynamic documents may be validated"""
class Embedded(DynamicEmbeddedDocument):
content = URLField()
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
embedded_doc_1 = Embedded(content='http://mongoengine.org')
embedded_doc_1.validate()
embedded_doc_2 = Embedded(content='this is not a url')
self.assertRaises(ValidationError, embedded_doc_2.validate)
doc.embedded_field_1 = embedded_doc_1
doc.embedded_field_2 = embedded_doc_2
self.assertRaises(ValidationError, doc.validate)
def test_inheritance(self):
"""Ensure that dynamic document plays nice with inheritance"""
class Employee(self.Person):
salary = IntField()
Employee.drop_collection()
self.assertTrue('name' in Employee._fields)
self.assertTrue('salary' in Employee._fields)
self.assertEqual(Employee._get_collection_name(),
self.Person._get_collection_name())
joe_bloggs = Employee()
joe_bloggs.name = "Joe Bloggs"
joe_bloggs.salary = 10
joe_bloggs.age = 20
joe_bloggs.save()
self.assertEqual(1, self.Person.objects(age=20).count())
self.assertEqual(1, Employee.objects(age=20).count())
joe_bloggs = self.Person.objects.first()
self.assertTrue(isinstance(joe_bloggs, Employee))
def test_embedded_dynamic_document(self):
"""Test dynamic embedded documents"""
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
embedded_1 = Embedded()
embedded_1.string_field = 'hello'
embedded_1.int_field = 1
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
self.assertEqual(doc.to_mongo(), {
"embedded_field": {
"_cls": "Embedded",
"string_field": "hello",
"int_field": 1,
"dict_field": {"hello": "world"},
"list_field": ['1', 2, {'hello': 'world'}]
}
})
doc.save()
doc = Doc.objects.first()
self.assertEqual(doc.embedded_field.__class__, Embedded)
self.assertEqual(doc.embedded_field.string_field, "hello")
self.assertEqual(doc.embedded_field.int_field, 1)
self.assertEqual(doc.embedded_field.dict_field, {'hello': 'world'})
self.assertEqual(doc.embedded_field.list_field,
['1', 2, {'hello': 'world'}])
def test_complex_embedded_documents(self):
"""Test complex dynamic embedded documents setups"""
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
embedded_1 = Embedded()
embedded_1.string_field = 'hello'
embedded_1.int_field = 1
embedded_1.dict_field = {'hello': 'world'}
embedded_2 = Embedded()
embedded_2.string_field = 'hello'
embedded_2.int_field = 1
embedded_2.dict_field = {'hello': 'world'}
embedded_2.list_field = ['1', 2, {'hello': 'world'}]
embedded_1.list_field = ['1', 2, embedded_2]
doc.embedded_field = embedded_1
self.assertEqual(doc.to_mongo(), {
"embedded_field": {
"_cls": "Embedded",
"string_field": "hello",
"int_field": 1,
"dict_field": {"hello": "world"},
"list_field": ['1', 2,
{"_cls": "Embedded",
"string_field": "hello",
"int_field": 1,
"dict_field": {"hello": "world"},
"list_field": ['1', 2, {'hello': 'world'}]}
]
}
})
doc.save()
doc = Doc.objects.first()
self.assertEqual(doc.embedded_field.__class__, Embedded)
self.assertEqual(doc.embedded_field.string_field, "hello")
self.assertEqual(doc.embedded_field.int_field, 1)
self.assertEqual(doc.embedded_field.dict_field, {'hello': 'world'})
self.assertEqual(doc.embedded_field.list_field[0], '1')
self.assertEqual(doc.embedded_field.list_field[1], 2)
embedded_field = doc.embedded_field.list_field[2]
self.assertEqual(embedded_field.__class__, Embedded)
self.assertEqual(embedded_field.string_field, "hello")
self.assertEqual(embedded_field.int_field, 1)
self.assertEqual(embedded_field.dict_field, {'hello': 'world'})
self.assertEqual(embedded_field.list_field, ['1', 2,
{'hello': 'world'}])
def test_dynamic_and_embedded(self):
"""Ensure embedded documents play nicely"""
class Address(EmbeddedDocument):
city = StringField()
class Person(DynamicDocument):
name = StringField()
Person.drop_collection()
Person(name="Ross", address=Address(city="London")).save()
person = Person.objects.first()
person.address.city = "Lundenne"
person.save()
self.assertEqual(Person.objects.first().address.city, "Lundenne")
person = Person.objects.first()
person.address = Address(city="Londinium")
person.save()
self.assertEqual(Person.objects.first().address.city, "Londinium")
person = Person.objects.first()
person.age = 35
person.save()
self.assertEqual(Person.objects.first().age, 35)
def test_dynamic_embedded_works_with_only(self):
"""Ensure custom fieldnames on a dynamic embedded document are found by qs.only()"""
class Address(DynamicEmbeddedDocument):
city = StringField()
class Person(DynamicDocument):
address = EmbeddedDocumentField(Address)
Person.drop_collection()
Person(name="Eric", address=Address(city="San Francisco", street_number="1337")).save()
self.assertEqual(Person.objects.first().address.street_number, '1337')
self.assertEqual(Person.objects.only('address__street_number').first().address.street_number, '1337')
def test_dynamic_and_embedded_dict_access(self):
"""Ensure embedded dynamic documents work with dict[] style access"""
class Address(EmbeddedDocument):
city = StringField()
class Person(DynamicDocument):
name = StringField()
Person.drop_collection()
Person(name="Ross", address=Address(city="London")).save()
person = Person.objects.first()
person.attrval = "This works"
person["phone"] = "555-1212" # but this should too
# Same thing two levels deep
person["address"]["city"] = "Lundenne"
person.save()
self.assertEqual(Person.objects.first().address.city, "Lundenne")
self.assertEqual(Person.objects.first().phone, "555-1212")
person = Person.objects.first()
person.address = Address(city="Londinium")
person.save()
self.assertEqual(Person.objects.first().address.city, "Londinium")
person = Person.objects.first()
person["age"] = 35
person.save()
self.assertEqual(Person.objects.first().age, 35)
if __name__ == '__main__':
unittest.main()
|
|
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TrucnatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
X = check_array(X, dtype=np.float32)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Mixture distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
distributions_py = distributions
def _swap_first_last_axes(array):
rank = len(array.shape)
transpose = [rank - 1] + list(range(0, rank - 1))
return array.transpose(transpose)
@contextlib.contextmanager
def _test_capture_mvndiag_sample_outputs():
"""Use monkey-patching to capture the output of an MVNDiag _sample_n."""
data_container = []
true_mvndiag_sample_n = distributions_py.MultivariateNormalDiag._sample_n
def _capturing_mvndiag_sample_n(self, n, seed=None):
samples = true_mvndiag_sample_n(self, n=n, seed=seed)
data_container.append(samples)
return samples
distributions_py.MultivariateNormalDiag._sample_n = (
_capturing_mvndiag_sample_n)
yield data_container
distributions_py.MultivariateNormalDiag._sample_n = true_mvndiag_sample_n
@contextlib.contextmanager
def _test_capture_normal_sample_outputs():
"""Use monkey-patching to capture the output of an Normal _sample_n."""
data_container = []
true_normal_sample_n = distributions_py.Normal._sample_n
def _capturing_normal_sample_n(self, n, seed=None):
samples = true_normal_sample_n(self, n=n, seed=seed)
data_container.append(samples)
return samples
distributions_py.Normal._sample_n = _capturing_normal_sample_n
yield data_container
distributions_py.Normal._sample_n = true_normal_sample_n
def make_univariate_mixture(batch_shape, num_components):
logits = random_ops.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=dtypes.float32) - 50.
components = [
distributions_py.Normal(
mu=np.float32(np.random.randn(*list(batch_shape))),
sigma=np.float32(10 * np.random.rand(*list(batch_shape))))
for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=dtypes.int32)
return distributions_py.Mixture(cat, components)
def make_multivariate_mixture(batch_shape, num_components, event_shape):
logits = random_ops.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=dtypes.float32) - 50.
components = [
distributions_py.MultivariateNormalDiag(
mu=np.float32(np.random.randn(*list(batch_shape + event_shape))),
diag_stdev=np.float32(10 * np.random.rand(
*list(batch_shape + event_shape)))) for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=dtypes.int32)
return distributions_py.Mixture(cat, components)
class MixtureTest(test.TestCase):
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_univariate_mixture(batch_shape, num_components=10)
self.assertAllEqual(batch_shape, dist.get_batch_shape())
self.assertAllEqual(batch_shape, dist.batch_shape().eval())
self.assertAllEqual([], dist.get_event_shape())
self.assertAllEqual([], dist.event_shape().eval())
for event_shape in ([1], [2]):
dist = make_multivariate_mixture(
batch_shape, num_components=10, event_shape=event_shape)
self.assertAllEqual(batch_shape, dist.get_batch_shape())
self.assertAllEqual(batch_shape, dist.batch_shape().eval())
self.assertAllEqual(event_shape, dist.get_event_shape())
self.assertAllEqual(event_shape, dist.event_shape().eval())
def testBrokenShapesStatic(self):
with self.assertRaisesWithPredicateMatch(ValueError,
r"cat.num_classes != len"):
distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.5]), # 2 classes
[distributions_py.Normal(
mu=1.0, sigma=2.0)])
with self.assertRaisesWithPredicateMatch(
ValueError, r"\(\) and \(2,\) are not compatible"):
# The value error is raised because the batch shapes of the
# Normals are not equal. One is a scalar, the other is a
# vector of size (2,).
distributions_py.Mixture(
distributions_py.Categorical([-0.5, 0.5]), # scalar batch
[
distributions_py.Normal(
mu=1.0, sigma=2.0), # scalar dist
distributions_py.Normal(
mu=[1.0, 1.0], sigma=[2.0, 2.0])
])
with self.assertRaisesWithPredicateMatch(ValueError, r"Could not infer"):
cat_logits = array_ops.placeholder(shape=[1, None], dtype=dtypes.float32)
distributions_py.Mixture(
distributions_py.Categorical(cat_logits),
[distributions_py.Normal(
mu=[1.0], sigma=[2.0])])
def testBrokenShapesDynamic(self):
with self.test_session():
d0_param = array_ops.placeholder(dtype=dtypes.float32)
d1_param = array_ops.placeholder(dtype=dtypes.float32)
d = distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.2]), [
distributions_py.Normal(
mu=d0_param, sigma=d0_param), distributions_py.Normal(
mu=d1_param, sigma=d1_param)
],
validate_args=True)
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: [1.0]})
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: 1.0})
def testBrokenTypes(self):
with self.assertRaisesWithPredicateMatch(TypeError, "Categorical"):
distributions_py.Mixture(None, [])
cat = distributions_py.Categorical([0.3, 0.2])
# components must be a list of distributions
with self.assertRaisesWithPredicateMatch(
TypeError, "all .* must be Distribution instances"):
distributions_py.Mixture(cat, [None])
with self.assertRaisesWithPredicateMatch(TypeError, "same dtype"):
distributions_py.Mixture(
cat, [
distributions_py.Normal(
mu=[1.0], sigma=[2.0]), distributions_py.Normal(
mu=[np.float16(1.0)], sigma=[np.float16(2.0)])
])
with self.assertRaisesWithPredicateMatch(ValueError, "non-empty list"):
distributions_py.Mixture(distributions_py.Categorical([0.3, 0.2]), None)
with self.assertRaisesWithPredicateMatch(TypeError,
"either be continuous or not"):
distributions_py.Mixture(
cat, [
distributions_py.Normal(
mu=[1.0], sigma=[2.0]), distributions_py.Bernoulli(
dtype=dtypes.float32, logits=[1.0])
])
def testMeanUnivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=2)
mean = dist.mean()
self.assertEqual(batch_shape, mean.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape, mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testMeanMultivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,))
mean = dist.mean()
self.assertEqual(batch_shape + (4,), mean.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape + (4,), mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# Add a new innermost dimension for broadcasting to mvn vector shape
cat_probs_value = [np.expand_dims(c_p, -1) for c_p in cat_probs_value]
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testProbScalarUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[], num_components=2)
for x in [
np.array(
[1.0, 2.0], dtype=np.float32), np.array(
1.0, dtype=np.float32),
np.random.randn(3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbScalarMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[], num_components=2, event_shape=[3])
for x in [
np.array(
[[-1.0, 0.0, 1.0], [0.5, 1.0, -0.3]], dtype=np.float32), np.array(
[-1.0, 0.0, 1.0], dtype=np.float32),
np.random.randn(2, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[2, 3], num_components=2)
for x in [
np.random.randn(2, 3).astype(np.float32),
np.random.randn(4, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=2, event_shape=[4])
for x in [
np.random.randn(2, 3, 4).astype(np.float32),
np.random.randn(4, 2, 3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testSampleScalarBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4,), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4,), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch univariate case: batch_size == 1, rank 1
which_dist_samples = dist_sample_values[c][:size_c]
self.assertAllClose(which_dist_samples, sample_values[which_c])
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
# 5 component mixture.
logits = [-10.0, -5.0, 0.0, 5.0, 10.0]
mus = [-5.0, 0.0, 5.0, 4.0, 20.0]
sigmas = [0.1, 5.0, 3.0, 0.2, 4.0]
with self.test_session():
n = 100
random_seed.set_random_seed(654321)
components = [
distributions_py.Normal(
mu=mu, sigma=sigma) for mu, sigma in zip(mus, sigmas)
]
cat = distributions_py.Categorical(
logits, dtype=dtypes.int32, name="cat1")
dist1 = distributions_py.Mixture(cat, components, name="mixture1")
samples1 = dist1.sample(n, seed=123456).eval()
random_seed.set_random_seed(654321)
components2 = [
distributions_py.Normal(
mu=mu, sigma=sigma) for mu, sigma in zip(mus, sigmas)
]
cat2 = distributions_py.Categorical(
logits, dtype=dtypes.int32, name="cat2")
dist2 = distributions_py.Mixture(cat2, components2, name="mixture2")
samples2 = dist2.sample(n, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testSampleScalarBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[], num_components=num_components, event_shape=[2])
n = 4
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch multivariate case: batch_size == 1, rank 2
which_dist_samples = dist_sample_values[c][:size_c, :]
self.assertAllClose(which_dist_samples, sample_values[which_c, :])
def testSampleBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[2, 3], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2, 3), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2, 3), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 3
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1])
def testSampleBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=num_components, event_shape=[4])
n = 5
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((5, 2, 3, 4), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((5, 2, 3, 4), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 4 (multivariate)
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1, :]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1, :])
def testEntropyLowerBoundMultivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,))
entropy_lower_bound = dist.entropy_lower_bound()
self.assertEqual(batch_shape, entropy_lower_bound.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_entropy = [d.entropy() for d in dist.components]
entropy_lower_bound_value, cat_probs_value, dist_entropy_value = (
sess.run([entropy_lower_bound, cat_probs, dist_entropy]))
self.assertEqual(batch_shape, entropy_lower_bound_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# entropy_lower_bound = sum_i pi_i entropy_i
# for i in num_components, batchwise.
true_entropy_lower_bound = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_entropy_value)])
self.assertAllClose(true_entropy_lower_bound, entropy_lower_bound_value)
class MixtureBenchmark(test.Benchmark):
def _runSamplingBenchmark(self, name, create_distribution, use_gpu,
num_components, batch_size, num_features,
sample_size):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
np.random.seed(127)
with session.Session(config=config, graph=ops.Graph()) as sess:
random_seed.set_random_seed(0)
with ops.device("/gpu:0" if use_gpu else "/cpu:0"):
mixture = create_distribution(
num_components=num_components,
batch_size=batch_size,
num_features=num_features)
sample_op = mixture.sample(sample_size).op
sess.run(variables.global_variables_initializer())
reported = self.run_op_benchmark(
sess,
sample_op,
min_iters=10,
name=("%s_%s_components_%d_batch_%d_features_%d_sample_%d" %
(name, use_gpu, num_components, batch_size, num_features,
sample_size)))
print("\t".join(["%s", "%d", "%d", "%d", "%d", "%g"]) %
(use_gpu, num_components, batch_size, num_features, sample_size,
reported["wall_time"]))
def benchmarkSamplingMVNDiag(self):
print("mvn_diag\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def create_distribution(batch_size, num_components, num_features):
cat = distributions_py.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(np.random.rand(batch_size, num_features))
for _ in range(num_components)
]
components = list(
distributions_py.MultivariateNormalDiag(
mu=mu, diag_stdev=sigma) for (mu, sigma) in zip(mus, sigmas))
return distributions_py.Mixture(cat, components)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_diag",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
def benchmarkSamplingMVNFull(self):
print("mvn_full\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def psd(x):
"""Construct batch-wise PSD matrices."""
return np.stack([np.dot(np.transpose(z), z) for z in x])
def create_distribution(batch_size, num_components, num_features):
cat = distributions_py.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(
psd(np.random.rand(batch_size, num_features, num_features)))
for _ in range(num_components)
]
components = list(
distributions_py.MultivariateNormalFull(
mu=mu, sigma=sigma) for (mu, sigma) in zip(mus, sigmas))
return distributions_py.Mixture(cat, components)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_full",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
if __name__ == "__main__":
test.main()
|
|
import json
import requests
from io import BytesIO
from time import sleep, time
from zipfile import ZipFile, BadZipfile
from logging import getLogger
from requests.exceptions import RequestException
try:
from types import NoneType
except ImportError:
NoneType = type(None)
from .exceptions import BadCredentials, BadRequest
log = getLogger(__name__)
class Client(object):
API_VERSION = "v1"
def __init__(
self,
environment,
account_id=None,
access_token=None,
json_options=None
):
self.domain, self.domain_stream = environment
self.access_token = access_token
self.account_id = account_id
self.json_options = json_options or {}
if account_id and not self.get_credentials():
raise BadCredentials()
def get_credentials(self):
"""
See more: http://developer.oanda.com/rest-live/accounts/
"""
url = "{0}/{1}/accounts/{2}".format(
self.domain,
self.API_VERSION,
self.account_id
)
try:
response = self._Client__call(uri=url)
assert len(response) > 0
return response
except RequestException:
return False
except AssertionError:
return False
def __get_response(self, uri, params=None, method="get", stream=False):
"""Creates a response object with the given params and option
Parameters
----------
url : string
The full URL to request.
params: dict
A list of parameters to send with the request. This
will be sent as data for methods that accept a request
body and will otherwise be sent as query parameters.
method : str
The HTTP method to use.
stream : bool
Whether to stream the response.
Returns a requests.Response object.
"""
if not hasattr(self, "session") or not self.session:
self.session = requests.Session()
if self.access_token:
self.session.headers.update(
{'Authorization': 'Bearer {}'.format(self.access_token)}
)
# Remove empty params
if params:
params = {k: v for k, v in params.items() if v is not None}
kwargs = {
"url": uri,
"verify": True,
"stream": stream
}
kwargs["params" if method == "get" else "data"] = params
return getattr(self.session, method)(**kwargs)
def __call(self, uri, params=None, method="get"):
"""Only returns the response, nor the status_code
"""
try:
resp = self.__get_response(uri, params, method, False)
rjson = resp.json(**self.json_options)
assert resp.ok
except AssertionError:
msg = "OCode-{}: {}".format(resp.status_code, rjson["message"])
raise BadRequest(msg)
except Exception as e:
msg = "Bad response: {}".format(e)
log.error(msg, exc_info=True)
raise BadRequest(msg)
else:
return rjson
def __call_stream(self, uri, params=None, method="get"):
"""Returns an stream response
"""
try:
resp = self.__get_response(uri, params, method, True)
assert resp.ok
except AssertionError:
raise BadRequest(resp.status_code)
except Exception as e:
log.error("Bad response: {}".format(e), exc_info=True)
else:
return resp
def get_instruments(self):
"""
See more:
http://developer.oanda.com/rest-live/rates/#getInstrumentList
"""
url = "{0}/{1}/instruments".format(self.domain, self.API_VERSION)
params = {"accountId": self.account_id}
try:
response = self._Client__call(uri=url, params=params)
assert len(response) > 0
return response
except RequestException:
return False
except AssertionError:
return False
def get_prices(self, instruments, stream=True):
"""
See more:
http://developer.oanda.com/rest-live/rates/#getCurrentPrices
"""
url = "{0}/{1}/prices".format(
self.domain_stream if stream else self.domain,
self.API_VERSION
)
params = {"accountId": self.account_id, "instruments": instruments}
call = {"uri": url, "params": params, "method": "get"}
try:
if stream:
return self._Client__call_stream(**call)
else:
return self._Client__call(**call)
except RequestException:
return False
except AssertionError:
return False
def get_instrument_history(self, instrument, candle_format="bidask",
granularity='S5', count=500,
daily_alignment=None, alignment_timezone=None,
weekly_alignment="Monday", start=None,
end=None):
"""
See more:
http://developer.oanda.com/rest-live/rates/#retrieveInstrumentHistory
"""
url = "{0}/{1}/candles".format(self.domain, self.API_VERSION)
params = {
"accountId": self.account_id,
"instrument": instrument,
"candleFormat": candle_format,
"granularity": granularity,
"count": count,
"dailyAlignment": daily_alignment,
"alignmentTimezone": alignment_timezone,
"weeklyAlignment": weekly_alignment,
"start": start,
"end": end,
}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False
def get_orders(self, instrument=None, count=50):
"""
See more:
http://developer.oanda.com/rest-live/orders/#getOrdersForAnAccount
"""
url = "{0}/{1}/accounts/{2}/orders".format(
self.domain,
self.API_VERSION,
self.account_id
)
params = {"instrument": instrument, "count": count}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False
def get_order(self, order_id):
"""
See more:
http://developer.oanda.com/rest-live/orders/#getInformationForAnOrder
"""
url = "{0}/{1}/accounts/{2}/orders/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
order_id
)
try:
return self._Client__call(uri=url, method="get")
except RequestException:
return False
except AssertionError:
return False
def create_order(self, order):
"""
See more:
http://developer.oanda.com/rest-live/orders/#createNewOrder
"""
url = "{0}/{1}/accounts/{2}/orders".format(
self.domain,
self.API_VERSION,
self.account_id
)
try:
return self._Client__call(
uri=url,
params=order.__dict__,
method="post"
)
except RequestException:
return False
except AssertionError:
return False
def update_order(self, order_id, order):
"""
See more:
http://developer.oanda.com/rest-live/orders/#modifyExistingOrder
"""
url = "{0}/{1}/accounts/{2}/orders/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
order_id
)
try:
return self._Client__call(
uri=url,
params=order.__dict__,
method="patch"
)
except RequestException:
return False
except AssertionError:
return False
def close_order(self, order_id):
"""
See more:
http://developer.oanda.com/rest-live/orders/#closeOrder
"""
url = "{0}/{1}/accounts/{2}/orders/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
order_id
)
try:
return self._Client__call(uri=url, method="delete")
except RequestException:
return False
except AssertionError:
return False
def get_trades(self, max_id=None, count=None, instrument=None, ids=None):
""" Get a list of open trades
Parameters
----------
max_id : int
The server will return trades with id less than or equal
to this, in descending order (for pagination)
count : int
Maximum number of open trades to return. Default: 50 Max
value: 500
instrument : str
Retrieve open trades for a specific instrument only
Default: all
ids : list
A list of trades to retrieve. Maximum number of ids: 50.
No other parameter may be specified with the ids
parameter.
See more:
http://developer.oanda.com/rest-live/trades/#getListOpenTrades
"""
url = "{0}/{1}/accounts/{2}/trades".format(
self.domain,
self.API_VERSION,
self.account_id
)
params = {
"maxId": int(max_id) if max_id and max_id > 0 else None,
"count": int(count) if count and count > 0 else None,
"instrument": instrument,
"ids": ','.join(ids) if ids else None
}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False
def get_trade(self, trade_id):
""" Get information on a specific trade.
Parameters
----------
trade_id : int
The id of the trade to get information on.
See more:
http://developer.oanda.com/rest-live/trades/#getInformationSpecificTrade
"""
url = "{0}/{1}/accounts/{2}/trades/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
trade_id
)
try:
return self._Client__call(uri=url, method="get")
except RequestException:
return False
except AssertionError:
return False
def update_trade(
self,
trade_id,
stop_loss=None,
take_profit=None,
trailing_stop=None
):
""" Modify an existing trade.
Note: Only the specified parameters will be modified. All
other parameters will remain unchanged. To remove an
optional parameter, set its value to 0.
Parameters
----------
trade_id : int
The id of the trade to modify.
stop_loss : number
Stop Loss value.
take_profit : number
Take Profit value.
trailing_stop : number
Trailing Stop distance in pips, up to one decimal place
See more:
http://developer.oanda.com/rest-live/trades/#modifyExistingTrade
"""
url = "{0}/{1}/accounts/{2}/trades/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
trade_id
)
params = {
"stopLoss": stop_loss,
"takeProfit": take_profit,
"trailingStop": trailing_stop
}
try:
return self._Client__call(uri=url, params=params, method="patch")
except RequestException:
return False
except AssertionError:
return False
raise NotImplementedError()
def close_trade(self, trade_id):
""" Close an open trade.
Parameters
----------
trade_id : int
The id of the trade to close.
See more:
http://developer.oanda.com/rest-live/trades/#closeOpenTrade
"""
url = "{0}/{1}/accounts/{2}/trades/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
trade_id
)
try:
return self._Client__call(uri=url, method="delete")
except RequestException:
return False
except AssertionError:
return False
def get_positions(self):
""" Get a list of all open positions.
See more:
http://developer.oanda.com/rest-live/positions/#getListAllOpenPositions
"""
url = "{0}/{1}/accounts/{2}/positions".format(
self.domain,
self.API_VERSION,
self.account_id
)
try:
return self._Client__call(uri=url, method="get")
except RequestException:
return False
except AssertionError:
return False
def get_position(self, instrument):
""" Get the position for an instrument.
Parameters
----------
instrument : string
The instrument to get the open position for.
See more:
http://developer.oanda.com/rest-live/positions/#getPositionForInstrument
"""
url = "{0}/{1}/accounts/{2}/positions/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
instrument
)
try:
return self._Client__call(uri=url, method="get")
except RequestException:
return False
except AssertionError:
return False
def close_position(self, instrument):
""" Close an existing position
Parameters
----------
instrument : string
The instrument to close the position for.
See more:
http://developer.oanda.com/rest-live/positions/#closeExistingPosition
"""
url = "{0}/{1}/accounts/{2}/positions/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
instrument
)
try:
return self._Client__call(uri=url, method="delete")
except RequestException:
return False
except AssertionError:
return False
def get_transactions(
self,
max_id=None,
count=None,
instrument="all",
ids=None
):
""" Get a list of transactions.
Parameters
----------
max_id : int
The server will return transactions with id less than or
equal to this, in descending order (for pagination).
count : int
Maximum number of open transactions to return. Default:
50. Max value: 500.
instrument : str
Retrieve open transactions for a specific instrument
only. Default: all.
ids : list
A list of transactions to retrieve. Maximum number of
ids: 50. No other parameter may be specified with the
ids parameter.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getTransactionHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
"""
url = "{0}/{1}/accounts/{2}/transactions".format(
self.domain,
self.API_VERSION,
self.account_id
)
params = {
"maxId": int(max_id) if max_id and max_id > 0 else None,
"count": int(count) if count and count > 0 else None,
"instrument": instrument,
"ids": ','.join(ids) if ids else None
}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False
def get_transaction(self, transaction_id):
""" Get information on a specific transaction.
Parameters
----------
transaction_id : int
The id of the transaction to get information on.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getInformationForTransaction
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
"""
url = "{0}/{1}/accounts/{2}/transactions/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
transaction_id
)
try:
return self._Client__call(uri=url, method="get")
except RequestException:
return False
except AssertionError:
return False
def request_transaction_history(self):
""" Request full account history.
Submit a request for a full transaction history. A
successfully accepted submission results in a response
containing a URL in the Location header to a file that will
be available once the request is served. Response for the
URL will be HTTP 404 until the file is ready. Once served
the URL will be valid for a certain amount of time.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getFullAccountHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
"""
url = "{0}/{1}/accounts/{2}/alltransactions".format(
self.domain,
self.API_VERSION,
self.account_id
)
try:
resp = self.__get_response(url)
return resp.headers['location']
except RequestException:
return False
except AssertionError:
return False
def get_transaction_history(self, max_wait=5.0):
""" Download full account history.
Uses request_transaction_history to get the transaction
history URL, then polls the given URL until it's ready (or
the max_wait time is reached) and provides the decoded
response.
Parameters
----------
max_wait : float
The total maximum time to spend waiting for the file to
be ready; if this is exceeded a failed response will be
returned. This is not guaranteed to be strictly
followed, as one last attempt will be made to check the
file before giving up.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getFullAccountHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
"""
url = self.request_transaction_history()
if not url:
return False
ready = False
start = time()
delay = 0.1
while not ready and delay:
response = requests.head(url)
ready = response.ok
if not ready:
sleep(delay)
time_remaining = max_wait - time() + start
max_delay = max(0., time_remaining - .1)
delay = min(delay * 2, max_delay)
if not ready:
return False
response = requests.get(url)
try:
with ZipFile(BytesIO(response.content)) as container:
files = container.namelist()
if not files:
log.error('Transaction ZIP has no files.')
return False
history = container.open(files[0])
raw = history.read().decode('ascii')
except BadZipfile:
log.error('Response is not a valid ZIP file', exc_info=True)
return False
return json.loads(raw, **self.json_options)
def create_account(self, currency=None):
""" Create a new account.
This call is only available on the sandbox system. Please
create accounts on fxtrade.oanda.com on our production
system.
See more:
http://developer.oanda.com/rest-sandbox/accounts/#-a-name-createtestaccount-a-create-a-test-account
"""
url = "{0}/{1}/accounts".format(self.domain, self.API_VERSION)
params = {"currency": currency}
try:
return self._Client__call(uri=url, params=params, method="post")
except RequestException:
return False
except AssertionError:
return False
def get_accounts(self, username=None):
""" Get a list of accounts owned by the user.
Parameters
----------
username : string
The name of the user. Note: This is only required on the
sandbox, on production systems your access token will
identify you.
See more:
http://developer.oanda.com/rest-sandbox/accounts/#-a-name-getaccountsforuser-a-get-accounts-for-a-user
"""
url = "{0}/{1}/accounts".format(self.domain, self.API_VERSION)
params = {"username": username}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False
|
|
# -*- coding: utf-8 -*-
# -- Dual Licence ----------------------------------------------------------
############################################################################
# GPL License #
# #
# This file is a SCons (http://www.scons.org/) builder #
# Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
# --------------------------------------------------------------------------
############################################################################
# BSD 3-Clause License #
# #
# This file is a SCons (http://www.scons.org/) builder #
# Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are #
# met: #
# #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# 3. Neither the name of the copyright holder nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED #
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
############################################################################
# The Unpack Builder can be used for unpacking archives (eg Zip, TGZ, BZ, ... ).
# The emitter of the Builder reads the archive data and creates a returning file list
# the builder extract the archive. The environment variable stores a dictionary "UNPACK"
# for set different extractions (subdict "EXTRACTOR"):
# {
# PRIORITY => a value for setting the extractor order (lower numbers = extractor is used earlier)
# SUFFIX => defines a list with file suffixes, which should be handled with this extractor
# EXTRACTSUFFIX => suffix of the extract command
# EXTRACTFLAGS => a string parameter for the RUN command for extracting the data
# EXTRACTCMD => full extract command of the builder
# RUN => the main program which will be started (if the parameter is empty, the extractor will be ignored)
# LISTCMD => the listing command for the emitter
# LISTFLAGS => the string options for the RUN command for showing a list of files
# LISTSUFFIX => suffix of the list command
# LISTEXTRACTOR => a optional Python function, that is called on each output line of the
# LISTCMD for extracting file & dir names, the function need two parameters (first line number,
# second line content) and must return a string with the file / dir path (other value types
# will be ignored)
# }
# Other options in the UNPACK dictionary are:
# STOPONEMPTYFILE => bool variable for stoping if the file has empty size (default True)
# VIWEXTRACTOUTPUT => shows the output messages of the extraction command (default False)
# EXTRACTDIR => path in that the data will be extracted (default #)
#
# The file which is handled by the first suffix match of the extractor, the extractor list can be append for other files.
# The order of the extractor dictionary creates the listing & extractor command eg file extension .tar.gz should be
# before .gz, because the tar.gz is extract in one shoot.
#
# Under *nix system these tools are supported: tar, bzip2, gzip, unzip
# Under Windows only 7-Zip (http://www.7-zip.org/) is supported
import subprocess, os
import SCons.Errors, SCons.Warnings
import SCons.Util
# enables Scons warning for this builder
class UnpackWarning(SCons.Warnings.Warning) :
pass
SCons.Warnings.enableWarningClass(UnpackWarning)
# extractor function for Tar output
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_tar( env, count, no, i ) :
return i.split()[-1]
# extractor function for GZip output,
# ignore the first line
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_gzip( env, count, no, i ) :
if no == 0 :
return None
return i.split()[-1]
# extractor function for Unzip output,
# ignore the first & last two lines
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_unzip( env, count, no, i ) :
if no < 3 or no >= count - 2 :
return None
return i.split()[-1]
# extractor function for 7-Zip
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_win_7zip( env, count, no, i ) :
item = i.split()
if no > 8 and no < count - 2 :
return item[-1]
return None
# returns the extractor item for handling the source file
# @param source input source file
# @param env environment object
# @return extractor entry or None on non existing
def __getExtractor( source, env ) :
# we check each unpacker and get the correc list command first, run the command and
# replace the target filelist with the list values, we sorte the extractors by their priority
for unpackername, extractor in sorted(env["UNPACK"]["EXTRACTOR"].iteritems(), key = lambda (k,v) : (v["PRIORITY"],k)):
# if the run command not set, we continue the extractor search, otherwise we check the extractor parameters
if not SCons.Util.is_String(extractor["RUN"]) :
raise SCons.Errors.StopError("list command of the unpack builder for [%s] archives is not a string" % (unpackername))
if not len(extractor["RUN"]) :
raise SCons.Errors.StopError("run command of the unpack builder for [%s] archives is not set - can not extract files" % (unpackername))
if not SCons.Util.is_String(extractor["LISTFLAGS"]) :
raise SCons.Errors.StopError("list flags of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["LISTCMD"]) :
raise SCons.Errors.StopError("list command of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["EXTRACTFLAGS"]) :
raise SCons.Errors.StopError("extract flags of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["EXTRACTCMD"]) :
raise SCons.Errors.StopError("extract command of the unpack builder for [%s] archives is not a string" % (unpackername))
# check the source file suffix and if the first is found, run the list command
if not SCons.Util.is_List(extractor["SUFFIX"]) :
raise SCons.Errors.StopError("suffix list of the unpack builder for [%s] archives is not a list" % (unpackername))
for suffix in extractor["SUFFIX"] :
if str(source[0]).lower()[-len(suffix):] == suffix.lower() :
return extractor
return None
# creates the extracter output message
# @param s original message
# @param target target name
# @param source source name
# @param env environment object
def __message( s, target, source, env ) :
print "extract [%s] ..." % (source[0])
# action function for extracting of the data
# @param target target packed file
# @param source extracted files
# @env environment object
def __action( target, source, env ) :
extractor = __getExtractor([File(source)], env)
if not extractor :
raise SCons.Errors.StopError( "can not find any extractor value for the source file [%s]" % (source[0]) )
# if the extract command is empty, we create an error
if len(extractor["EXTRACTCMD"]) == 0 :
raise SCons.Errors.StopError( "the extractor command for the source file [%s] is empty" % (source[0]) )
# build it now (we need the shell, because some programs need it)
handle = None
cmd = env.subst(extractor["EXTRACTCMD"], source=source, target=target)
if env["UNPACK"]["VIWEXTRACTOUTPUT"] :
handle = subprocess.Popen( cmd, shell=True )
else :
devnull = open(os.devnull, "wb")
handle = subprocess.Popen( cmd, shell=True, stdout=devnull )
if handle.wait() <> 0 :
raise SCons.Errors.BuildError( "error running extractor [%s] on the source [%s]" % (cmd, source[0]) )
# emitter function for getting the files
# within the archive
# @param target target packed file
# @param source extracted files
# @env environment object
def __emitter( target, source, env ) :
extractor = __getExtractor(source, env)
if not extractor :
raise SCons.Errors.StopError( "can not find any extractor value for the source file [%s]" % (source[0]) )
# we do a little trick, because in some cases we do not have got a physical
# file (eg we download a packed archive), so we don't get a list or knows
# the targets. On physical files we can do this with the LISTCMD, but on
# non-physical files we hope the user knows the target files, so we inject
# this knowledge into the return target.
if env.has_key("UNPACKLIST") :
if not SCons.Util.is_List(env["UNPACKLIST"]) and not SCons.Util.is_String(env["UNPACKLIST"]) :
raise SCons.Errors.StopError( "manual target list of [%s] must be a string or list" % (source[0]) )
if not env["UNPACKLIST"] :
raise SCons.Errors.StopError( "manual target list of [%s] need not be empty" % (source[0]) )
return env["UNPACKLIST"], source
# we check if the source file exists, because we would like to read the data
if not source[0].exists() :
raise SCons.Errors.StopError( "source file [%s] must be exist" % (source[0]) )
# create the list command and run it in a subprocess and pipes the output to a variable,
# we need the shell for reading data from the stdout
cmd = env.subst(extractor["LISTCMD"], source=source, target=target)
handle = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE )
target = handle.stdout.readlines()
handle.communicate()
if handle.returncode <> 0 :
raise SCons.Errors.StopError("error on running list command [%s] of the source file [%s]" % (cmd, source[0]) )
# if the returning output exists and the listseperator is a callable structure
# we run it for each line of the output and if the return of the callable is
# a string we push it back to the target list
try :
if callable(extractor["LISTEXTRACTOR"]) :
target = filter( lambda s : SCons.Util.is_String(s), [ extractor["LISTEXTRACTOR"]( env, len(target), no, i) for no, i in enumerate(target) ] )
except Exception, e :
raise SCons.Errors.StopError( "%s" % (e) )
# the line removes duplicated names - we need this line, otherwise an cyclic dependency error will occured,
# because the list process can create redundant data (an archive file can not store redundant content in a filepath)
target = [i.strip() for i in list(set(target))]
if not target :
SCons.Warnings.warn(UnpackWarning, "emitter file list on target [%s] is empty, please check your extractor list function [%s]" % (source[0], cmd) )
# we append the extractdir to each target if is not absolut
if env["UNPACK"]["EXTRACTDIR"] <> "." :
target = [i if os.path.isabs(i) else os.path.join(env["UNPACK"]["EXTRACTDIR"], i) for i in target]
return target, source
def __unpack_all(env, target, source) :
if os.path.exists(target):
return
print "Unpacking %s ..." % source
__action(target, source, env)
# generate function, that adds the builder to the environment
# @env environment object
def generate( env ) :
# setup environment variable
toolset = {
"STOPONEMPTYFILE" : True,
"VIWEXTRACTOUTPUT" : False,
"EXTRACTDIR" : os.curdir,
"EXTRACTOR" : {
"TARGZ" : {
"PRIORITY" : 0,
"SUFFIX" : [".tar.gz", ".tgz", ".tar.gzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TARGZ']['RUN']} ${UNPACK['EXTRACTOR']['TARGZ']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARGZ']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TARGZ']['RUN']} ${UNPACK['EXTRACTOR']['TARGZ']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARGZ']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"TARBZ" : {
"PRIORITY" : 0,
"SUFFIX" : [".tar.bz", ".tbz", ".tar.bz2", ".tar.bzip2", ".tar.bzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TARBZ']['RUN']} ${UNPACK['EXTRACTOR']['TARBZ']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARBZ']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TARBZ']['RUN']} ${UNPACK['EXTRACTOR']['TARBZ']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARBZ']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"BZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".bz", "bzip", ".bz2", ".bzip2"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['BZIP']['RUN']} ${UNPACK['EXTRACTOR']['BZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['BZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['BZIP']['RUN']} ${UNPACK['EXTRACTOR']['BZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['BZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"GZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".gz", ".gzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['GZIP']['RUN']} ${UNPACK['EXTRACTOR']['GZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['GZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['GZIP']['RUN']} ${UNPACK['EXTRACTOR']['GZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['GZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"TAR" : {
"PRIORITY" : 1,
"SUFFIX" : [".tar"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TAR']['RUN']} ${UNPACK['EXTRACTOR']['TAR']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TAR']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TAR']['RUN']} ${UNPACK['EXTRACTOR']['TAR']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TAR']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"ZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".zip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['ZIP']['RUN']} ${UNPACK['EXTRACTOR']['ZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['ZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['ZIP']['RUN']} ${UNPACK['EXTRACTOR']['ZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['ZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
}
}
}
# read tools for Windows system
if env["PLATFORM"] <> "darwin" and "win" in env["PLATFORM"] :
if env.WhereIs('7z', env.get('PATH')):
toolset["EXTRACTOR"]["TARGZ"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TARGZ"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TARGZ"]["LISTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARGZ"]["LISTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} l -sii -ttar -y -so"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} x -sii -ttar -y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARBZ"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TARBZ"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TARBZ"]["LISTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARBZ"]["LISTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} l -sii -ttar -y -so"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} x -sii -ttar -y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["BZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["BZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["BZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["BZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTSUFFIX"] = "-y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["GZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["GZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["GZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["GZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTSUFFIX"] = "-y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["ZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["ZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["ZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["ZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTSUFFIX"] = "-y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TAR"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TAR"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TAR"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["TAR"]["LISTSUFFIX"] = "-y -ttar -so"
toolset["EXTRACTOR"]["TAR"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TAR"]["EXTRACTSUFFIX"] = "-y -ttar -o${UNPACK['EXTRACTDIR']}"
else:
print '''*********************** Error ************************
* *
* Please make sure that 7-zip is in your System PATH *
* *
******************************************************
'''
# here can add some other Windows tools, that can handle the archive files
# but I don't know which ones can handle all file types
# read the tools on *nix systems and sets the default parameters
elif env["PLATFORM"] in ["darwin", "linux", "posix", "msys"] :
if env.WhereIs("unzip") :
toolset["EXTRACTOR"]["ZIP"]["RUN"] = "unzip"
toolset["EXTRACTOR"]["ZIP"]["LISTEXTRACTOR"] = __fileextractor_nix_unzip
toolset["EXTRACTOR"]["ZIP"]["LISTFLAGS"] = "-l"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTFLAGS"] = "-oqq"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTSUFFIX"] = "-d ${UNPACK['EXTRACTDIR']}"
if env.WhereIs("tar") :
toolset["EXTRACTOR"]["TAR"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TAR"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TAR"]["LISTFLAGS"] = "tvf"
toolset["EXTRACTOR"]["TAR"]["EXTRACTFLAGS"] = "xf"
toolset["EXTRACTOR"]["TAR"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARGZ"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TARGZ"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTFLAGS"] = "xfz"
toolset["EXTRACTOR"]["TARGZ"]["LISTFLAGS"] = "tvfz"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARBZ"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TARBZ"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTFLAGS"] = "xfj"
toolset["EXTRACTOR"]["TARBZ"]["LISTFLAGS"] = "tvfj"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
if env.WhereIs("bzip2") :
toolset["EXTRACTOR"]["BZIP"]["RUN"] = "bzip2"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTFLAGS"] = "-df"
if env.WhereIs("gzip") :
toolset["EXTRACTOR"]["GZIP"]["RUN"] = "gzip"
toolset["EXTRACTOR"]["GZIP"]["LISTEXTRACTOR"] = __fileextractor_nix_gzip
toolset["EXTRACTOR"]["GZIP"]["LISTFLAGS"] = "-l"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTFLAGS"] = "-df"
else :
raise SCons.Errors.StopError("Unpack tool detection on this platform [%s] unkown" % (env["PLATFORM"]))
# the target_factory must be a "Entry", because the target list can be files and dirs, so we can not specified the targetfactory explicite
env.Replace(UNPACK = toolset)
env.AddMethod(__unpack_all, 'UnpackAll')
# env["BUILDERS"]["UnpackAll"] = SCons.Builder.Builder( action = __action, emitter = __emitter, target_factory = SCons.Node.FS.Entry, source_factory = SCons.Node.FS.File, single_source = True, PRINT_CMD_LINE_FUNC = __message )
# existing function of the builder
# @param env environment object
# @return true
def exists(env) :
return 1
Import('env')
generate(env)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import math
import re
import sys
import time
from django import forms
from django.contrib import messages
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, QueryDict
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from desktop.appmanager import get_apps_dict
from desktop.context_processors import get_app_name
from desktop.lib.paginator import Paginator
from desktop.lib.django_util import JsonResponse
from desktop.lib.django_util import copy_query_dict, format_preserving_redirect, render
from desktop.lib.django_util import login_notrequired, get_desktop_uri_prefix
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from desktop.models import Document
from jobsub.parameterization import find_variables
import beeswax.forms
import beeswax.design
import beeswax.management.commands.beeswax_install_examples
from beeswax import common, data_export, models
from beeswax.models import SavedQuery, QueryHistory
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException
LOG = logging.getLogger(__name__)
def index(request):
return execute_query(request)
"""
Design views
"""
def save_design(request, form, type_, design, explicit_save):
"""
save_design(request, form, type_, design, explicit_save) -> SavedQuery
A helper method to save the design:
* If ``explicit_save``, then we save the data in the current design.
* If the user clicked the submit button, we do NOT overwrite the current
design. Instead, we create a new "auto" design (iff the user modified
the data). This new design is named after the current design, with the
AUTO_DESIGN_SUFFIX to signify that it's different.
Need to return a SavedQuery because we may end up with a different one.
Assumes that form.saveform is the SaveForm, and that it is valid.
"""
authorized_get_design(request, design.id)
assert form.saveform.is_valid()
sub_design_form = form # Beeswax/Impala case
if type_ == models.HQL:
design_cls = beeswax.design.HQLdesign
elif type_ == models.IMPALA:
design_cls = beeswax.design.HQLdesign
elif type_ == models.SPARK:
from spark.design import SparkDesign
design_cls = SparkDesign
sub_design_form = form.query
else:
raise ValueError(_('Invalid design type %(type)s') % {'type': type_})
design_obj = design_cls(sub_design_form, query_type=type_)
name = form.saveform.cleaned_data['name']
desc = form.saveform.cleaned_data['desc']
return _save_design(request.user, design, type_, design_obj, explicit_save, name, desc)
def _save_design(user, design, type_, design_obj, explicit_save, name=None, desc=None):
# Design here means SavedQuery
old_design = design
new_data = design_obj.dumps()
# Auto save if (1) the user didn't click "save", and (2) the data is different.
# Create an history design if the user is executing a shared design.
# Don't generate an auto-saved design if the user didn't change anything.
if explicit_save and (not design.doc.exists() or design.doc.get().can_write_or_exception(user)):
design.name = name
design.desc = desc
design.is_auto = False
elif design_obj != old_design.get_design():
# Auto save iff the data is different
if old_design.id is not None:
# Clone iff the parent design isn't a new unsaved model
design = old_design.clone(new_owner=user)
if not old_design.is_auto:
design.name = old_design.name + models.SavedQuery.AUTO_DESIGN_SUFFIX
else:
design.name = models.SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.is_auto = True
design.name = design.name[:64]
design.type = type_
design.data = new_data
design.save()
LOG.info('Saved %s design "%s" (id %s) for %s' % (explicit_save and '' or 'auto ', design.name, design.id, design.owner))
if design.doc.exists():
design.doc.update(name=design.name, description=design.desc)
else:
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
if design.is_auto:
design.doc.get().add_to_history()
return design
def delete_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id, owner_only=True)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot delete non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
if request.POST.get('skipTrash', 'false') == 'false':
design.doc.get().send_to_trash()
else:
design.doc.all().delete()
design.delete()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Delete design(s)?')})
def restore_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot restore non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
design.doc.get().restore_from_trash()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Restore design(s)?')})
def clone_design(request, design_id):
"""Clone a design belonging to any user"""
design = authorized_get_design(request, design_id)
if design is None:
LOG.error('Cannot clone non-existent design %s' % (design_id,))
return list_designs(request)
copy = design.clone(request.user)
copy.save()
copy_doc = Document.objects.link(copy,
owner=copy.owner,
name=copy.name,
description=copy.desc,
extra=copy.type)
messages.info(request, _('Copied design: %(name)s') % {'name': design.name})
return format_preserving_redirect(request, reverse(get_app_name(request) + ':execute_design', kwargs={'design_id': copy.id}))
def list_designs(request):
"""
View function for show all saved queries.
We get here from /beeswax/list_designs?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show design items belonging to a user. Default to all users.
type=<type> - <type> is "hql", for saved query type. Default to show all.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "name", "desc", and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
text=<frag> - Search for fragment "frag" in names and descriptions.
"""
DEFAULT_PAGE_SIZE = 20
app_name = get_app_name(request)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
page, filter_params = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
return render('list_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def list_trashed_designs(request):
DEFAULT_PAGE_SIZE = 20
app_name= get_app_name(request)
user = request.user
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
page, filter_params = _list_designs(user, querydict_query, DEFAULT_PAGE_SIZE, prefix, is_trashed=True)
return render('list_trashed_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def my_queries(request):
"""
View a mix of history and saved queries.
It understands all the GET params in ``list_query_history`` (with a ``h-`` prefix)
and those in ``list_designs`` (with a ``q-`` prefix). The only thing it disallows
is the ``user`` filter, since this view only shows what belongs to the user.
"""
DEFAULT_PAGE_SIZE = 30
app_name= get_app_name(request)
# Extract the history list.
prefix = 'h-'
querydict_history = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_history[ prefix + 'user' ] = request.user
querydict_history[ prefix + 'type' ] = app_name
hist_page, hist_filter = _list_query_history(request.user,
querydict_history,
DEFAULT_PAGE_SIZE,
prefix)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'user' ] = request.user
querydict_query[ prefix + 'type' ] = app_name
query_page, query_filter = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter_params = hist_filter
filter_params.update(query_filter)
return render('my_queries.mako', request, {
'request': request,
'h_page': hist_page,
'q_page': query_page,
'filter_params': filter_params,
'designs_json': json.dumps([query.id for query in query_page.object_list])
})
def list_query_history(request):
"""
View the history of query (for the current user).
We get here from /beeswax/query_history?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show history items from a user. Default to current user only.
Also accepts ':all' to show all history items.
type=<type> - <type> is "beeswax|impala", for design type. Default to show all.
design_id=<id> - Show history for this particular design id.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "state", "name" (design name), and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
auto_query=<bool> - Show auto generated actions (drop table, read data, etc). Default True
"""
DEFAULT_PAGE_SIZE = 100
prefix = 'q-'
share_queries = request.user.is_superuser
querydict_query = request.GET.copy()
if not share_queries:
querydict_query[prefix + 'user'] = request.user.username
app_name = get_app_name(request)
querydict_query[prefix + 'type'] = app_name
page, filter_params = _list_query_history(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter = request.GET.get(prefix + 'search') and request.GET.get(prefix + 'search') or ''
if request.GET.get('format') == 'json':
resp = {
'queries': [massage_query_history_for_json(app_name, query_history) for query_history in page.object_list]
}
return JsonResponse(resp)
return render('list_history.mako', request, {
'request': request,
'page': page,
'filter_params': filter_params,
'share_queries': share_queries,
'prefix': prefix,
'filter': filter,
})
def massage_query_history_for_json(app_name, query_history):
return {
'query': escape(query_history.query),
'timeInMs': time.mktime(query_history.submission_date.timetuple()),
'timeFormatted': query_history.submission_date.strftime("%x %X"),
'designUrl': reverse(app_name + ':execute_design', kwargs={'design_id': query_history.design.id}),
'resultsUrl': not query_history.is_failure() and reverse(app_name + ':watch_query_history', kwargs={'query_history_id': query_history.id}) or ""
}
def download(request, id, format):
try:
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
LOG.debug('Download results for query %s: [ %s ]' % (query_history.server_id, query_history.query))
return data_export.download(query_history.get_handle(), format, db)
except Exception, e:
if not hasattr(e, 'message') or not e.message:
message = e
else:
message = e.message
raise PopupException(message, detail='')
"""
Queries Views
"""
def execute_query(request, design_id=None, query_history_id=None):
"""
View function for executing an arbitrary query.
"""
action = 'query'
if query_history_id:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
design = query_history.design
try:
if query_history.server_id and query_history.server_guid:
handle, state = _get_query_handle_and_state(query_history)
if 'on_success_url' in request.GET:
if request.GET.get('on_success_url'):
action = 'watch-redirect'
else:
action = 'watch-results'
else:
action = 'editor-results'
except QueryServerException, e:
if 'Invalid query handle' in e.message or 'Invalid OperationHandle' in e.message:
query_history.save_state(QueryHistory.STATE.expired)
LOG.warn("Invalid query handle", exc_info=sys.exc_info())
action = 'editor-expired-results'
else:
raise e
else:
# Check perms.
authorized_get_design(request, design_id)
app_name = get_app_name(request)
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
query_history = None
doc = design and design.id and design.doc.get()
context = {
'design': design,
'query': query_history, # Backward
'query_history': query_history,
'autocomplete_base_url': reverse(get_app_name(request) + ':api_autocomplete_databases', kwargs={}),
'autocomplete_base_url_hive': reverse('beeswax:api_autocomplete_databases', kwargs={}),
'can_edit_name': design and design.id and not design.is_auto,
'doc_id': doc and doc.id or -1,
'can_edit': doc and doc.can_write(request.user),
'action': action,
'on_success_url': request.GET.get('on_success_url'),
'has_metastore': 'metastore' in get_apps_dict(request.user)
}
return render('execute.mako', request, context)
def view_results(request, id, first_row=0):
"""
Returns the view for the results of the QueryHistory with the given id.
The query results MUST be ready.
To display query results, one should always go through the execute_query view.
If the result set has has_result_set=False, display an empty result.
If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just
spits out a warning if first_row doesn't match the servers conception.
Multiple readers will produce a confusing interaction here, and that's known.
It understands the ``context`` GET parameter. (See execute_query().)
"""
first_row = long(first_row)
start_over = (first_row == 0)
results = type('Result', (object,), {
'rows': 0,
'columns': [],
'has_more': False,
'start_row': 0,
})
data = []
fetch_error = False
error_message = ''
log = ''
columns = []
app_name = get_app_name(request)
query_history = authorized_get_query_history(request, id, must_exist=True)
query_server = query_history.get_query_server_config()
db = dbms.get(request.user, query_server)
handle, state = _get_query_handle_and_state(query_history)
context_param = request.GET.get('context', '')
query_context = parse_query_context(context_param)
# Update the status as expired should not be accessible
expired = state == models.QueryHistory.STATE.expired
# Retrieve query results or use empty result if no result set
try:
if query_server['server_name'] == 'impala' and not handle.has_result_set:
downloadable = False
else:
results = db.fetch(handle, start_over, 100)
data = []
# Materialize and HTML escape results
# TODO: use Number + list comprehension
for row in results.rows():
escaped_row = []
for field in row:
if isinstance(field, (int, long, float, complex, bool)):
if math.isnan(field) or math.isinf(field):
escaped_field = json.dumps(field)
else:
escaped_field = field
elif field is None:
escaped_field = 'NULL'
else:
field = smart_unicode(field, errors='replace') # Prevent error when getting back non utf8 like charset=iso-8859-1
escaped_field = escape(field).replace(' ', ' ')
escaped_row.append(escaped_field)
data.append(escaped_row)
# We display the "Download" button only when we know that there are results:
downloadable = first_row > 0 or data
log = db.get_log(handle)
columns = results.data_table.cols()
except Exception, ex:
LOG.exception('error fetching results')
fetch_error = True
error_message, log = expand_exception(ex, db, handle)
# Handle errors
error = fetch_error or results is None or expired
context = {
'error': error,
'message': error_message,
'query': query_history,
'results': data,
'columns': columns,
'expected_first_row': first_row,
'log': log,
'hadoop_jobs': app_name != 'impala' and _parse_out_hadoop_jobs(log),
'query_context': query_context,
'can_save': False,
'context_param': context_param,
'expired': expired,
'app_name': app_name,
'next_json_set': None,
'is_finished': query_history.is_finished()
}
if not error:
download_urls = {}
if downloadable:
for format in common.DL_FORMATS:
download_urls[format] = reverse(app_name + ':download', kwargs=dict(id=str(id), format=format))
results.start_row = first_row
context.update({
'id': id,
'results': data,
'has_more': results.has_more,
'next_row': results.start_row + len(data),
'start_row': results.start_row,
'expected_first_row': first_row,
'columns': columns,
'download_urls': download_urls,
'can_save': query_history.owner == request.user,
'next_json_set':
reverse(get_app_name(request) + ':view_results', kwargs={
'id': str(id),
'first_row': results.start_row + len(data)
}
)
+ ('?context=' + context_param or '') + '&format=json'
})
context['columns'] = massage_columns_for_json(columns)
if 'save_form' in context:
del context['save_form']
if 'query' in context:
del context['query']
return JsonResponse(context)
def configuration(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
config_values = dbms.get(request.user, query_server).get_default_configuration(
bool(request.REQUEST.get("include_hadoop", False)))
for value in config_values:
if 'password' in value.key.lower():
value.value = "*" * 10
return render("configuration.mako", request, {'config_values': config_values})
"""
Other views
"""
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
app_name = get_app_name(request)
beeswax.management.commands.beeswax_install_examples.Command().handle(app_name=app_name, user=request.user)
response['status'] = 0
except Exception, err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return JsonResponse(response)
@login_notrequired
def query_done_cb(request, server_id):
"""
A callback for query completion notification. When the query is done,
BeeswaxServer notifies us by sending a GET request to this view.
"""
message_template = '<html><head></head>%(message)s<body></body></html>'
message = {'message': 'error'}
try:
query_history = QueryHistory.objects.get(server_id=server_id + '\n')
# Update the query status
query_history.set_to_available()
# Find out details about the query
if not query_history.notify:
message['message'] = 'email_notify is false'
return HttpResponse(message_template % message)
design = query_history.design
user = query_history.owner
subject = _("Beeswax query completed.")
if design:
subject += ": %s" % (design.name,)
link = "%s%s" % \
(get_desktop_uri_prefix(),
reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id}))
body = _("%(subject)s. See the results here: %(link)s\n\nQuery:\n%(query)s") % {
'subject': subject, 'link': link, 'query': query_history.query
}
user.email_user(subject, body)
message['message'] = 'sent'
except Exception, ex:
msg = "Failed to send query completion notification via e-mail: %s" % (ex)
LOG.error(msg)
message['message'] = msg
return HttpResponse(message_template % message)
"""
Utils
"""
def massage_columns_for_json(cols):
massaged_cols = []
for column in cols:
massaged_cols.append({
'name': column.name,
'type': column.type,
'comment': column.comment
})
return massaged_cols
def authorized_get_design(request, design_id, owner_only=False, must_exist=False):
if design_id is None and not must_exist:
return None
try:
design = SavedQuery.objects.get(id=design_id)
except SavedQuery.DoesNotExist:
if must_exist:
raise PopupException(_('Design %(id)s does not exist.') % {'id': design_id})
else:
return None
if owner_only:
design.doc.get().can_write_or_exception(request.user)
else:
design.doc.get().can_read_or_exception(request.user)
return design
def authorized_get_query_history(request, query_history_id, owner_only=False, must_exist=False):
if query_history_id is None and not must_exist:
return None
try:
query_history = QueryHistory.get(id=query_history_id)
except QueryHistory.DoesNotExist:
if must_exist:
raise PopupException(_('QueryHistory %(id)s does not exist.') % {'id': query_history_id})
else:
return None
# Some queries don't have a design so are not linked to Document Model permission
if query_history.design is None or not query_history.design.doc.exists():
if not request.user.is_superuser and request.user != query_history.owner:
raise PopupException(_('Permission denied to read QueryHistory %(id)s') % {'id': query_history_id})
else:
query_history.design.doc.get().can_read_or_exception(request.user)
return query_history
def safe_get_design(request, design_type, design_id=None):
"""
Return a new design, if design_id is None,
Return the design with the given id and type. If the design is not found,
display a notification and return a new design.
"""
design = None
if design_id is not None:
design = authorized_get_design(request, design_id)
if design is None:
design = SavedQuery(owner=request.user, type=design_type)
return design
def make_parameterization_form(query_str):
"""
Creates a django form on the fly with arguments from the
query.
"""
variables = find_variables(query_str)
if len(variables) > 0:
class Form(forms.Form):
for name in sorted(variables):
locals()[name] = forms.CharField(required=True)
return Form
else:
return None
def execute_directly(request, query, query_server=None,
design=None, on_success_url=None, on_success_params=None,
**kwargs):
"""
execute_directly(request, query_msg, tablename, design) -> HTTP response for execution
This method wraps around dbms.execute_query() to take care of the HTTP response
after the execution.
query
The HQL model Query object.
query_server
To which Query Server to submit the query.
Dictionary with keys: ['server_name', 'server_host', 'server_port'].
design
The design associated with the query.
on_success_url
Where to go after the query is done. The URL handler may expect an option "context" GET
param. (See ``watch_query``.) For advanced usage, on_success_url can be a function, in
which case the on complete URL is the return of:
on_success_url(history_obj) -> URL string
Defaults to the view results page.
on_success_params
Optional params to pass to the on_success_url (in additional to "context").
Note that this may throw a Beeswax exception.
"""
if design is not None:
authorized_get_design(request, design.id)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
query_history = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id})
# Prepare the GET params for the watch_url
get_dict = QueryDict(None, mutable=True)
# (1) on_success_url
if on_success_url:
if callable(on_success_url):
on_success_url = on_success_url(query_history)
get_dict['on_success_url'] = on_success_url
# (2) misc
if on_success_params:
get_dict.update(on_success_params)
return format_preserving_redirect(request, watch_url, get_dict)
def _list_designs(user, querydict, page_size, prefix="", is_trashed=False):
"""
_list_designs(user, querydict, page_size, prefix, is_trashed) -> (page, filter_param)
A helper to gather the designs page. It understands all the GET params in
``list_designs``, by reading keys from the ``querydict`` with the given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='last_modified',
name='name',
desc='description',
type='extra',
)
# Trash and security
if is_trashed:
db_queryset = Document.objects.trashed_docs(SavedQuery, user)
else:
db_queryset = Document.objects.available_docs(SavedQuery, user)
# Filter by user
filter_username = querydict.get(prefix + 'user')
if filter_username:
try:
db_queryset = db_queryset.filter(owner=User.objects.get(username=filter_username))
except User.DoesNotExist:
# Don't care if a bad filter term is provided
pass
# Design type
d_type = querydict.get(prefix + 'type')
if d_type and d_type in SavedQuery.TYPES_MAPPING.keys():
db_queryset = db_queryset.filter(extra=str(SavedQuery.TYPES_MAPPING[d_type]))
# Text search
frag = querydict.get(prefix + 'text')
if frag:
db_queryset = db_queryset.filter(Q(name__icontains=frag) | Q(description__icontains=frag))
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
else:
sort_dir, sort_attr = '', sort_key
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_designs: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr])
designs = [job.content_object for job in db_queryset.all() if job.content_object and job.content_object.is_auto == False]
pagenum = int(querydict.get(prefix + 'page', 1))
paginator = Paginator(designs, page_size)
page = paginator.page(pagenum)
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _get_query_handle_and_state(query_history):
"""
Front-end wrapper to handle exceptions. Expects the query to be submitted.
"""
handle = query_history.get_handle()
if handle is None:
raise PopupException(_("Failed to retrieve query state from the Query Server."))
state = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(handle)
if state is None:
raise PopupException(_("Failed to contact Server to check query status."))
return (handle, state)
def parse_query_context(context):
"""
parse_query_context(context) -> ('table', <table_name>) -or- ('design', <design_obj>)
"""
if not context:
return None
pair = context.split(':', 1)
if len(pair) != 2 or pair[0] not in ('table', 'design'):
LOG.error("Invalid query context data: %s" % (context,))
return None
if pair[0] == 'design': # Translate design id to design obj
pair[1] = models.SavedQuery.get(int(pair[1]))
return pair
HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
def _parse_out_hadoop_jobs(log):
"""
Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
"""
ret = []
for match in HADOOP_JOBS_RE.finditer(log):
job_id = match.group(1)
if job_id not in ret:
ret.append(job_id)
return ret
def _copy_prefix(prefix, base_dict):
"""Copy keys starting with ``prefix``"""
querydict = QueryDict(None, mutable=True)
for key, val in base_dict.iteritems():
if key.startswith(prefix):
querydict[key] = val
return querydict
def _list_query_history(user, querydict, page_size, prefix=""):
"""
_list_query_history(user, querydict, page_size, prefix) -> (page, filter_param)
A helper to gather the history page. It understands all the GET params in
``list_query_history``, by reading keys from the ``querydict`` with the
given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='submission_date',
state='last_state',
name='design__name',
type='design__type',
)
db_queryset = models.QueryHistory.objects.select_related()
# Filtering
#
# Queries without designs are the ones we submitted on behalf of the user,
# (e.g. view table data). Exclude those when returning query history.
if querydict.get(prefix + 'auto_query', 'on') != 'on':
db_queryset = db_queryset.exclude(design__isnull=False, design__is_auto=True)
user_filter = querydict.get(prefix + 'user', user.username)
if user_filter != ':all':
db_queryset = db_queryset.filter(owner__username=user_filter)
# Design id
design_id = querydict.get(prefix + 'design_id')
if design_id:
db_queryset = db_queryset.filter(design__id=int(design_id))
# Search
search_filter = querydict.get(prefix + 'search')
if search_filter:
db_queryset = db_queryset.filter(Q(design__name__icontains=search_filter) | Q(query__icontains=search_filter) | Q(owner__username__icontains=search_filter))
# Design type
d_type = querydict.get(prefix + 'type')
if d_type:
if d_type not in SavedQuery.TYPES_MAPPING.keys():
LOG.warn('Bad parameter to list_query_history: type=%s' % (d_type,))
else:
db_queryset = db_queryset.filter(design__type=SavedQuery.TYPES_MAPPING[d_type])
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
sort_dir, sort_attr = '', sort_key
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_query_history: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr], '-id')
# Get the total return count before slicing
total_count = db_queryset.count()
# Slicing (must be the last filter applied)
pagenum = int(querydict.get(prefix + 'page', 1))
if pagenum < 1:
pagenum = 1
db_queryset = db_queryset[ page_size * (pagenum - 1) : page_size * pagenum ]
paginator = Paginator(db_queryset, page_size, total=total_count)
page = paginator.page(pagenum)
# We do slicing ourselves, rather than letting the Paginator handle it, in order to
# update the last_state on the running queries
for history in page.object_list:
_update_query_state(history.get_full_object())
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort', 'design_id', 'auto_query', 'search') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _update_query_state(query_history):
"""
Update the last_state for a QueryHistory object. Returns success as True/False.
This only occurs iff the current last_state is submitted or running, since the other
states are stable, more-or-less.
Note that there is a transition from available/failed to expired. That occurs lazily
when the user attempts to view results that have expired.
"""
if query_history.last_state <= models.QueryHistory.STATE.running.index:
try:
state_enum = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(query_history.get_handle())
if state_enum is None:
# Error was logged at the source
return False
except Exception, e:
LOG.error(e)
state_enum = models.QueryHistory.STATE.failed
query_history.save_state(state_enum)
return True
def get_db_choices(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
db = dbms.get(request.user, query_server)
dbs = db.get_databases()
return [(db, db) for db in dbs]
WHITESPACE = re.compile("\s+", re.MULTILINE)
def collapse_whitespace(s):
return WHITESPACE.sub(" ", s).strip()
|
|
import json
from django.conf import settings
from django.test import TestCase
from hamcrest import (
assert_that, equal_to, has_entry,
has_item, has_key
)
from httmock import HTTMock
from .factories import NodeFactory, NodeTypeFactory
from stagecraft.libs.authorization.tests.test_http import govuk_signon_mock
class NodeViewsTestCase(TestCase):
def setUp(self):
thing = NodeTypeFactory(
id='ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4',
name='Thing',
)
department = NodeTypeFactory(
id='f9510fef-a879-4cf8-bcfb-9e0871579f5a',
name='Department',
)
agency = NodeTypeFactory(
id='6f5aadb2-3bec-447f-87fd-c68f8c5ac2e8',
name='Agency',
)
NodeFactory(
name='no name',
id='9df09a3f-2ed6-46f3-8c5c-c42b860b98a6',
abbreviation=None,
typeOf=agency,
)
cheese = NodeFactory(
id='f59bddcc-4494-46f8-a2c9-884030fa3087',
name='Cheese',
abbreviation=None,
typeOf=department,
)
brie = NodeFactory(
id='edc9aa07-f45f-4d93-9f9c-d9d760f08019',
name='Brie',
abbreviation='BR',
typeOf=thing,
)
brie.parents.add(cheese)
def test_list_nodes(self):
resp = self.client.get(
'/organisation/node',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(3))
assert_that(
resp_json,
has_item(has_entry('id', 'edc9aa07-f45f-4d93-9f9c-d9d760f08019'))
)
assert_that(
resp_json,
has_item(has_entry('id', 'f59bddcc-4494-46f8-a2c9-884030fa3087'))
)
assert_that(
resp_json,
has_item(has_entry('id', '9df09a3f-2ed6-46f3-8c5c-c42b860b98a6'))
)
def test_list_nodes_filter_by_type(self):
resp = self.client.get(
'/organisation/node?type=Department&type=Agency',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(2))
assert_that(
resp_json,
has_item(has_entry('id', 'f59bddcc-4494-46f8-a2c9-884030fa3087'))
)
assert_that(
resp_json,
has_item(has_entry('id', '9df09a3f-2ed6-46f3-8c5c-c42b860b98a6'))
)
def test_list_nodes_filter_by_name(self):
resp = self.client.get(
'/organisation/node?name=Brie',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(1))
assert_that(
resp_json,
has_item(has_entry('id', 'edc9aa07-f45f-4d93-9f9c-d9d760f08019'))
)
def test_list_nodes_filter_by_abbr(self):
resp = self.client.get(
'/organisation/node?abbreviation=BR',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(1))
assert_that(
resp_json,
has_item(has_entry('id', 'edc9aa07-f45f-4d93-9f9c-d9d760f08019'))
)
def test_list_nodes_filter_by_both(self):
resp = self.client.get(
'/organisation/node?name=Brie&&abbreviation=BR',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(1))
assert_that(
resp_json,
has_item(has_entry('id', 'edc9aa07-f45f-4d93-9f9c-d9d760f08019'))
)
def test_list_nodes_filter_by_all(self):
resp = self.client.get(
'/organisation/node?name=Brie&&abbreviation=BR&type=Department',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(0))
def test_list_nodes_filter_by_both_is_and(self):
resp = self.client.get(
'/organisation/node?name=Cheese&&abbreviation=BR',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(0))
def test_list_nodes_filter_by_is_case_insensitive(self):
resp = self.client.get(
'/organisation/node?name=brie&&abbreviation=br',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(1))
assert_that(
resp_json,
has_item(has_entry('id', 'edc9aa07-f45f-4d93-9f9c-d9d760f08019'))
)
def test_get_nodes_ancestors(self):
node_uuid = 'edc9aa07-f45f-4d93-9f9c-d9d760f08019'
resp = self.client.get(
'/organisation/node/{}/ancestors'.format(node_uuid),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(1))
assert_that(
resp_json[0],
has_entry('id', 'f59bddcc-4494-46f8-a2c9-884030fa3087')
)
def test_get_nodes_ancestors_with_self(self):
node_uuid = 'edc9aa07-f45f-4d93-9f9c-d9d760f08019'
resp = self.client.get(
'/organisation/node/{}/ancestors?self=true'.format(node_uuid),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(2))
assert_that(
resp_json[0],
has_entry('id', 'f59bddcc-4494-46f8-a2c9-884030fa3087')
)
assert_that(
resp_json[1],
has_entry('id', 'edc9aa07-f45f-4d93-9f9c-d9d760f08019')
)
def test_add_node(self):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'name': 'Edam',
'abbreviation': 'ED',
'slug': 'wha',
'type_id': 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4',
'parent_id': 'f59bddcc-4494-46f8-a2c9-884030fa3087'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(200))
resp_json = json.loads(resp.content)
assert_that(resp_json, has_key('id'))
assert_that(resp_json['name'], equal_to('Edam'))
assert_that(resp_json['abbreviation'], equal_to('ED'))
assert_that(resp_json['type']['name'], equal_to('Thing'))
def test_add_node_bad_json(self):
resp = self.client.post(
'/organisation/node',
data='{"agfagd',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_node_missing_name(self):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'abbreviation': 'ED',
'type_id': 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4',
'parent_id': 'f59bddcc-4494-46f8-a2c9-884030fa3087'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_node_missing_type(self):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'name': 'Edam',
'abbreviation': 'ED',
'parent_id': 'f59bddcc-4494-46f8-a2c9-884030fa3087'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_node_non_existant_type(self):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'name': 'Edam',
'abbreviation': 'ED',
'type_id': '00000000-0000-0000-0000-000000000000',
'parent_id': 'f59bddcc-4494-46f8-a2c9-884030fa3087'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_node_non_existant_parent(self):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'name': 'Edam',
'abbreviation': 'ED',
'type_id': 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4',
'parent_id': '00000000-0000-0000-0000-000000000000'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_node_type_id_not_uuid(self):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'name': 'Edam',
'abbreviation': 'ED',
'type_id': 'foo',
'parent_id': 'f59bddcc-4494-46f8-a2c9-884030fa3087'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_node_parent_id_not_uuid(self):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'name': 'Edam',
'abbreviation': 'ED',
'type_id': 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4',
'parent_id': 'foo'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_node_with_no_abbr(self):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'name': 'Edam',
'slug': 'whoo',
'type_id': 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(200))
resp_json = json.loads(resp.content)
assert_that(resp_json, has_entry('abbreviation', 'Edam'))
def tearDown(self):
settings.USE_DEVELOPMENT_USERS = True
def test_add_node_without_permission(self):
settings.USE_DEVELOPMENT_USERS = False
signon = govuk_signon_mock(
permissions=['signin'],
email='some.user@digital.cabinet-office.gov.uk')
with HTTMock(signon):
resp = self.client.post(
'/organisation/node',
data=json.dumps({
'name': 'Edam',
'type_id': 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4'
}),
HTTP_AUTHORIZATION='Bearer correct-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(403))
class NodeTypeViewsTestCase(TestCase):
def setUp(self):
thing = NodeTypeFactory(
id='ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4',
name='Thing',
)
department = NodeTypeFactory(
id='f9510fef-a879-4cf8-bcfb-9e0871579f5a',
name='Department',
)
cheese = NodeFactory(
id='f59bddcc-4494-46f8-a2c9-884030fa3087',
name='Cheese',
abbreviation=None,
typeOf=department,
)
brie = NodeFactory(
id='edc9aa07-f45f-4d93-9f9c-d9d760f08019',
name='Brie',
abbreviation='BR',
typeOf=thing,
)
brie.parents.add(cheese)
def test_list_types(self):
resp = self.client.get(
'/organisation/type',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(2))
assert_that(
resp_json,
has_item(has_entry('id', 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4'))
)
assert_that(
resp_json,
has_item(has_entry('id', 'f9510fef-a879-4cf8-bcfb-9e0871579f5a'))
)
def test_list_types_filter_name(self):
resp = self.client.get(
'/organisation/type?name=Thing',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(1))
assert_that(
resp_json,
has_item(has_entry('id', 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4'))
)
def test_list_types_filter_name_should_be_case_insensitive(self):
resp = self.client.get(
'/organisation/type?name=thing',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token'
)
resp_json = json.loads(resp.content)
assert_that(len(resp_json), equal_to(1))
assert_that(
resp_json,
has_item(has_entry('id', 'ea72e3e1-13b8-4bf6-9ffb-7cd0d2f168d4'))
)
def test_add_type(self):
resp = self.client.post(
'/organisation/type',
data=json.dumps({
'name': 'Bagency'
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(200))
resp_json = json.loads(resp.content)
assert_that(resp_json, has_key('id'))
assert_that(resp_json, has_entry('name', 'Bagency'))
def test_add_type_bad_json(self):
resp = self.client.post(
'/organisation/type',
data='{"foo":"b',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_type_no_name(self):
resp = self.client.post(
'/organisation/type',
data=json.dumps({
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
|
|
# -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2009-2011 Stephen Tiedemann <stephen.tiedemann@googlemail.com>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
import logging
log = logging.getLogger(__name__)
from os import urandom
from time import time
from collections import namedtuple
import nfc.clf
class DataExchangeProtocol(object):
def __init__(self, clf):
self.exchange = lambda self, send_data, timeout: None
self.count = Counters()
self.clf = clf
self.gbi = ""
self.gbt = ""
@property
def general_bytes(self):
"""The general bytes received with the ATR exchange"""
if isinstance(self, Initiator):
return str(self.gbt)
if isinstance(self, Target):
return str(self.gbi)
@property
def role(self):
"""Role in DEP communication, either 'Target' or 'Initiator'"""
if isinstance(self, Initiator):
return "Initiator"
if isinstance(self, Target):
return "Target"
@property
def stat(self):
return str(self) + " sent/rcvd " \
"INF {count.inf_sent}/{count.inf_rcvd} " \
"ATN {count.atn_sent}/{count.atn_rcvd} " \
"ACK {count.ack_sent}/{count.ack_rcvd} " \
"NAK {count.nak_sent}/{count.nak_rcvd} " \
.format(count=self.count)
class Initiator(DataExchangeProtocol):
def __init__(self, clf):
DataExchangeProtocol.__init__(self, clf)
self.brm = None # bit-rate modulation ('106A', '212F', '424F')
self.miu = None # maximum information unit size
self.did = None # dep device identifier
self.nad = None # dep node address
self.gbt = None # general bytes from target
self.pni = None # dep packet number information
self.rwt = None # target response waiting time
def __str__(self):
return "NFC-DEP Initiator"
def activate(self, timeout=None, brs=(0, 1, 2), gbi='', did=None, lr=3):
"""Activate DEP communication as Initiator."""
if self.clf.capabilities.get('NFC-DEP') is True:
log.debug("using hardware DEP implementation")
gbt = self.clf.sense(targets=None, gbi=gbi)
self.exchange = self._hw_dep_exchange
return gbt
# brs: bit rate selection, an integer or list of integers, 0 => 106A
if not timeout: timeout = 4096 * 2**12 / 13.56E6
if type(brs) == int: brs = (brs,)
if did is not None: self.did = did
assert min(brs) >= 0 and max(brs) <= 2
ba = lambda s: bytearray(s.decode("hex"))
tta = {'cfg': None, 'uid': None}
ttf = {'idm': ba("01FE"), 'pmm': None, 'sys': ba('FFFF')}
targets = []
for br in brs:
if br == 0: targets.append(nfc.clf.TTA(br=106, **tta))
elif br == 1: targets.append(nfc.clf.TTF(br=212, **ttf))
elif br == 2: targets.append(nfc.clf.TTF(br=424, **ttf))
target = self.clf.sense(targets)
if target is None:
return None
if type(target) == nfc.clf.TTA:
if len(target.cfg) < 3 or target.cfg[2] & 0x40 == 0:
return None
self.brm = {106: '106A', 212: '212F', 424: '424F'}[target.br]
log.info("communication with p2p target started in {0}"
.format(self.brm))
if type(target) == nfc.clf.TTA:
nfcid3 = target.uid + urandom(4) + '\x00\x00'
if type(target) == nfc.clf.TTF:
nfcid3 = target.idm + '\x00\x00'
ppi = (lr << 4) | (bool(gbi) << 1) | int(bool(self.nad))
did = int(bool(self.did))
atr_req = ATR_REQ(nfcid3, did, 0, 0, ppi, gbi)
if len(atr_req) > 64:
raise nfc.clf.ProtocolError("14.6.1.1")
try: atr_res = self.send_req_recv_res(atr_req, timeout=2**24/13.56E6)
except nfc.clf.DigitalProtocolError: return
if type(atr_res) != ATR_RES:
raise nfc.clf.ProtocolError("Table-86")
if len(atr_res) > 64:
raise nfc.clf.ProtocolError("14.6.1.3")
self.rwt = 4096/13.56E6 * pow(2, atr_res.wt if atr_res.wt < 15 else 14)
self.miu = atr_res.lr - 3
self.gbt = atr_res.gb
if (106, 212, 424).index(target.br) < max(brs):
psl_req = PSL_REQ(self.did, max(brs) | max(brs)<<3, lr)
try: psl_res = self.send_req_recv_res(psl_req, timeout=self.rwt)
except nfc.clf.DigitalProtocolError: return
if type(psl_res) != PSL_RES:
raise nfc.clf.ProtocolError("Table-86")
if psl_res.did != psl_req.did:
raise nfc.clf.ProtocolError("14.7.2.2")
self.brm = ("106A", "212F", "424F")[psl_req.dsi]
self.clf.set_communication_mode(self.brm)
log.info("communication with p2p target changed to {0}"
.format(self.brm))
self.pni = 0
self.exchange = self._sw_dep_exchange
return atr_res.gb
def deactivate(self, release=True):
if self.exchange == self._sw_dep_exchange:
REQ, RES = (RLS_REQ, RLS_RES) if release else (DSL_REQ, DSL_RES)
req = REQ(self.did)
try:
res = self.send_req_recv_res(req, 0.1)
except nfc.clf.DigitalProtocolError:
pass
else:
if type(res) != RES:
raise nfc.clf.ProtocolError("Table-86")
if res.did != req.did:
raise nfc.clf.ProtocolError("14.7.2.2")
log.info(self.stat)
self.exchange = lambda self, send_data, timeout: None
return True
def _hw_dep_exchange(self, send_data, timeout):
log.debug("dep raw >> " + str(send_data).encode("hex"))
send_data = bytearray(send_data)
recv_data = self.clf.exchange(send_data, timeout)
if recv_data is not None:
recv_data = str(recv_data)
log.debug("dep raw << " + recv_data.encode("hex"))
return recv_data
def _sw_dep_exchange(self, send_data, timeout):
def INF(pni, data, more, did, nad):
pdu_type = (DEP_REQ.LastInformation, DEP_REQ.MoreInformation)[more]
pfb = DEP_REQ.PFB(pdu_type, nad is not None, did is not None, pni)
return DEP_REQ(pfb, did, nad, data)
def ACK(pni, did, nad):
pdu_type = DEP_REQ.PositiveAck
pfb = DEP_REQ.PFB(pdu_type, nad is not None, did is not None, pni)
return DEP_REQ(pfb, did, nad, data=None)
def RTOX(rtox, did, nad):
if rtox < 1 or rtox > 59:
raise nfc.clf.ProtocolError("14.8.4.2")
pdu_type = DEP_REQ.TimeoutExtension
pfb = DEP_REQ.PFB(pdu_type, nad is not None, did is not None, 0)
return DEP_REQ(pfb, did, nad, data=bytearray([rtox]))
#log.debug("dep raw >> " + str(send_data).encode("hex"))
send_data = bytearray(send_data)
while send_data:
data = send_data[0:self.miu]; del send_data[0:self.miu]
req = INF(self.pni, data, bool(send_data), self.did, self.nad)
res = self.send_dep_req_recv_dep_res(req, timeout)
self.count.inf_sent += 1
if res.pfb.type == DEP_RES.TimeoutExtension:
req = RTOX(res.data[0], self.did, self.nad)
rwt = res.data[0] * self.rwt
log.warning("target requested %.3f sec more time" % rwt)
res = self.send_dep_req_recv_dep_res(req, min(timeout, rwt))
if res.pfb.type == DEP_RES.TimeoutExtension:
log.error("target repeated timeout extension request")
raise nfc.clf.TimeoutError("repeated timeout extension")
if res.pfb.type == DEP_RES.PositiveAck:
self.count.ack_rcvd += 1
if not send_data:
raise nfc.clf.ProtocolError("14.12.4.3")
if res.pfb.pni != self.pni:
raise nfc.clf.ProtocolError("14.12.3.3")
self.pni = (self.pni + 1) & 0x3
if (res.pfb.type != DEP_RES.LastInformation and
res.pfb.type != DEP_RES.MoreInformation):
raise nfc.clf.ProtocolError("14.12.4.6")
recv_data = res.data
self.count.inf_rcvd += 1
while res.pfb.type == DEP_RES.MoreInformation:
req = ACK(self.pni, self.did, self.nad)
res = self.send_dep_req_recv_dep_res(req, timeout)
self.count.ack_sent += 1
if res.pfb.type == DEP_RES.TimeoutExtension:
req = RTOX(res.data[0], self.did, self.nad)
rwt = res.data[0] * self.rwt
log.warning("target requested %.3f sec more time" % rwt)
res = self.send_dep_req_recv_dep_res(req, min(timeout, rwt))
if res.pfb.type == DEP_RES.TimeoutExtension:
log.error("target repeated timeout extension request")
raise nfc.clf.TimeoutError("repeated timeout extension")
if (res.pfb.type != DEP_RES.LastInformation and
res.pfb.type != DEP_RES.MoreInformation):
raise nfc.clf.ProtocolError("14.12.4.7")
if res.pfb.pni != self.pni:
raise nfc.clf.ProtocolError("14.12.3.3")
recv_data += res.data
self.pni = (self.pni + 1) & 0x3
self.count.inf_rcvd += 1
#log.debug("dep raw << " + str(recv_data).encode("hex"))
return str(recv_data)
def send_dep_req_recv_dep_res(self, req, timeout):
def NAK(pni, did, nad):
pdu_type = DEP_REQ.NegativeAck
pfb = DEP_REQ.PFB(pdu_type, nad != None, did != None, self.pni)
return DEP_REQ(pfb, did, nad, data=None)
def ATN():
pdu_type = DEP_REQ.Attention
pfb = DEP_REQ.PFB(pdu_type, nad=False, did=False, pni=0)
return DEP_REQ(pfb, did=None, nad=None, data=None)
def request_attention(self, n_retry_atn, deadline):
req = ATN()
for i in range(n_retry_atn):
timeout = min(self.rwt, deadline - time())
if timeout <= 0: raise nfc.clf.TimeoutError
try:
res = self.send_req_recv_res(req, timeout)
except nfc.clf.DigitalProtocolError:
continue
self.count.atn_sent += 1
if res.pfb.type == DEP_RES.TimeoutExtension:
raise nfc.clf.ProtocolError("14.12.4.4")
if res.pfb.type != DEP_RES.Attention:
raise nfc.clf.ProtocolError("14.12.4.2")
self.count.atn_rcvd += 1
return
raise nfc.clf.ProtocolError("14.12.5.6")
def request_retransmission(self, n_retry_nak, deadline):
req = NAK(self.pni, self.did, self.nad)
for i in range(n_retry_nak):
timeout = min(self.rwt, deadline - time())
if timeout <= 0: raise nfc.clf.TimeoutError
try:
res = self.send_req_recv_res(req, timeout)
except nfc.clf.DigitalProtocolError:
continue
self.count.nak_sent += 1
if res.pfb.type == DEP_RES.TimeoutExtension:
raise nfc.clf.ProtocolError("14.12.4.4")
expected = (DEP_RES.LastInformation, DEP_RES.MoreInformation)
if res.pfb.type not in expected:
raise nfc.clf.ProtocolError("14.12.5.4")
return res
raise nfc.clf.ProtocolError("14.12.5.6")
deadline = time() + timeout
while True:
timeout = min(self.rwt, deadline - time())
if timeout <= 0: raise nfc.clf.TimeoutError()
try:
res = self.send_req_recv_res(req, timeout)
break
except nfc.clf.TimeoutError:
request_attention(self, 2, deadline)
continue
except nfc.clf.TransmissionError:
res = request_retransmission(self, 2, deadline)
break
if res.pfb.type == DEP_RES.NegativeAck:
raise nfc.clf.ProtocolError("14.12.4.5")
return res
def send_req_recv_res(self, req, timeout):
cmd = self.encode_frame(req)
rsp = self.clf.exchange(cmd, timeout)
res = self.decode_frame(rsp)
if res.PDU_NAME[0:3] != req.PDU_NAME[0:3]:
raise nfc.clf.ProtocolError("Table-86")
return res
def encode_frame(self, packet):
log.debug(">> {0}".format(packet))
frame = packet.encode()
frame = chr(len(frame) + 1) + frame
if self.brm == '106A':
frame = '\xF0' + frame
return frame
def decode_frame(self, frame):
if self.brm == '106A' and frame.pop(0) != 0xF0:
raise nfc.clf.ProtocolError("14.4.1.1")
if len(frame) != frame.pop(0):
raise nfc.clf.ProtocolError("14.4.1.2")
if len(frame) < 2:
raise nfc.clf.TransmissionError("14.4.1.3")
if frame[0] != 0xD5 or frame[1] not in (1, 5, 7, 9, 11):
raise nfc.clf.ProtocolError("Table-86")
res_name = {1: 'ATR', 5: 'PSL', 7: 'DEP', 9: 'DSL', 11: 'RLS'}
packet = eval(res_name[frame[1]] + "_RES").decode(frame)
log.debug("<< {0}".format(packet))
return packet
class Target(DataExchangeProtocol):
def __init__(self, clf):
DataExchangeProtocol.__init__(self, clf)
self.brm = None # bit-rate modulation (106A, 212F, 424F)
self.miu = None # maximum information unit size
self.did = None # dep device identifier
self.nad = None # dep node address
self.gbi = None # general bytes from initiator
self.pni = None # dep packet number information
self.rwt = None # target response waiting time
self.req = None # first dep-req received in activation
def __str__(self):
return "NFC-DEP Target"
def activate(self, timeout=None, brs=None, gbt='', wt=8, lr=3):
"""Activate DEP communication as Target."""
# brs (int): bit rate selection, 0 => 106A, 1 => 212F, 2 => 424F
if not timeout: timeout = (372 + ord(urandom(1))) * 1E-3
ba = lambda s: bytearray(s.decode("hex"))
tta = {'cfg':ba('010040'),'uid':ba('08')+urandom(3)}
ttf = {'idm':ba("01FE")+urandom(6),'pmm':ba("FF"*8),'sys':ba('FFFF')}
deadline = time() + timeout
target = nfc.clf.DEP(br={0: 106, 1: 212, 2: 424}.get(brs), gb=gbt)
activated = self.clf.listen(target, timeout)
if not activated: return None
target, req_frame = activated
self.brm = {106: '106A', 212: '212F', 424: '424F'}[target.br]
log.debug("communication as p2p target started in {0}"
.format(self.brm))
if self.clf.capabilities.get('NFC-DEP') is True:
self.exchange = self._hw_dep_exchange
self.req = req_frame
return target.gb
req = self.decode_frame(req_frame)
while type(req) != ATR_REQ or len(req) > 64:
req = self.send_res_recv_req(None, max(deadline, time()+1.0))
atr_req = req
if (type(target) == nfc.clf.TTF and not
atr_req.nfcid3.startswith(target.idm)):
raise nfc.clf.ProtocolError("14.6.2.1")
self.miu = atr_req.lr - 3
self.did = atr_req.did if atr_req.did > 0 else None
self.gbi = atr_req.gb
pp = (lr << 4) | (bool(gbt) << 1) | int(bool(self.nad))
atr_res = ATR_RES(atr_req.nfcid3, atr_req.did, 0, 0, wt, pp, gbt)
if len(atr_res) > 64:
raise nfc.clf.ProtocolError("14.6.1.4")
try: req = self.send_res_recv_req(atr_res, max(deadline, time()+1.0))
except nfc.clf.TimeoutError: return
if type(req) == PSL_REQ and req.did == atr_req.did:
self.miu = req.lr - 3
res = PSL_RES(did=req.did)
self.send_res_recv_req(res, 0)
self.brm = ("106A", "212F", "424F")[req.dsi]
self.clf.set_communication_mode(self.brm)
# FIXME: wait time should be shorter
req = self.send_res_recv_req(None, max(deadline, time()+2.0))
log.debug("communication as p2p target changed to {0}"
.format(self.brm))
if type(req) == DEP_REQ and req.did == self.did:
self.exchange = self._sw_dep_exchange
self.rwt = 4096/13.56E6 * pow(2, wt)
self.pni = 0
self.req = req
return atr_req.gb
elif type(req) == DSL_REQ:
self.send_res_recv_req(DSL_RES(self.did), 0)
elif type(req) == RLS_REQ:
self.send_res_recv_req(RLS_RES(self.did), 0)
def deactivate(self):
if self.exchange == self._sw_dep_exchange:
log.info(self.stat)
self.exchange = lambda self, send_data, timeout: None
def _hw_dep_exchange(self, send_data, timeout):
if self.req is not None:
# first packet is received in activate()
assert send_data is None, "send_data must be None on first call"
recv_data = self.req; self.req = None
else:
log.debug("dep raw >> " + str(send_data).encode("hex"))
send_data = bytearray(send_data)
recv_data = self.clf.exchange(send_data, timeout)
if recv_data is not None:
recv_data = str(recv_data)
log.debug("dep raw << " + recv_data.encode("hex"))
return recv_data
def _sw_dep_exchange(self, send_data, timeout):
def INF(pni, data, more, did, nad):
pdu_type = (DEP_RES.LastInformation, DEP_RES.MoreInformation)[more]
pfb = DEP_RES.PFB(pdu_type, nad is not None, did is not None, pni)
return DEP_RES(pfb, did, nad, data)
def ACK(pni, did, nad):
pdu_type = DEP_RES.PositiveAck
pfb = DEP_RES.PFB(pdu_type, nad is not None, did is not None, pni)
return DEP_RES(pfb, did, nad, data=None)
if send_data is not None and len(send_data) == 0:
raise ValueError("send_data must not be empty")
deadline = time() + timeout
if self.req is not None:
# first packet is received in activate()
assert send_data is None, "send_data must be None on first call"
req = self.req; self.req = None
else:
send_data = bytearray(send_data)
while send_data:
data = send_data[0:self.miu];
more = len(send_data) > self.miu
res = INF(self.pni, data, more, self.did, self.nad)
req = self.send_dep_res_recv_dep_req(res, deadline)
if req is None: return None
if more and req.pfb.type is not DEP_REQ.PositiveAck:
raise nfc.clf.ProtocolError("14.12.2.1")
self.pni = (self.pni + 1) & 0x3
if req.pfb.pni != self.pni:
raise nfc.clf.ProtocolError("14.12.3.3")
del send_data[0:self.miu]
recv_data = bytearray()
while req.pfb.type == DEP_REQ.MoreInformation:
recv_data += req.data
res = ACK(self.pni, self.did, self.nad)
req = self.send_dep_res_recv_dep_req(res, deadline)
if req is None: return None
self.pni = (self.pni + 1) & 0x3
if req.pfb.pni != self.pni:
raise nfc.clf.ProtocolError("14.12.3.3")
recv_data += req.data
return str(recv_data)
def send_timeout_extension(self, rtox):
def RTOX(rtox, did, nad):
pdu_type = DEP_RES.TimeoutExtension
pfb = DEP_RES.PFB(pdu_type, nad is not None, did is not None, 0)
return DEP_RES(pfb, did, nad, data=bytearray([rtox]))
res = RTOX(rtox, self.did, self.nad)
req = self.send_dep_res_recv_dep_req(res, deadline=time()+1)
if type(req) == DEP_REQ and req.pfb.type == DEP_REQ.TimeoutExtension:
return req.data[0] & 0x3F
def send_dep_res_recv_dep_req(self, dep_res, deadline):
def ATN(did, nad):
pdu_type = DEP_RES.Attention
pfb = DEP_RES.PFB(pdu_type, nad is not None, did is not None, 0)
return DEP_RES(pfb, did, nad, data=None)
res = dep_res; dep_req = None
while dep_req == None:
req = self.send_res_recv_req(res, deadline)
if req is None:
return None
elif req.did != self.did:
res = None
elif type(req) == DSL_REQ:
return self.send_res_recv_req(DSL_RES(self.did), 0)
elif type(req) == RLS_REQ:
return self.send_res_recv_req(RLS_RES(self.did), 0)
elif type(req) == DEP_REQ:
if req.pfb.type == DEP_REQ.Attention:
self.count.atn_rcvd += 1
res = ATN(self.did, self.nad)
self.count.atn_sent += 1
elif req.pfb.type == DEP_REQ.NegativeAck:
self.count.nak_rcvd += 1
res = dep_res
elif req.pfb.type == DEP_REQ.TimeoutExtension:
dep_req = req
elif req.pfb.pni == self.pni:
res = dep_res
else:
dep_req = req
return dep_req
def send_res_recv_req(self, res, deadline):
frame = self.encode_frame(res) if res is not None else None
while True:
timeout = deadline - time() if deadline > time() else 0
try:
frame = self.clf.exchange(frame, timeout=timeout)
return self.decode_frame(frame) if frame else None
except nfc.clf.TransmissionError:
frame = None
def encode_frame(self, packet):
log.debug(">> {0}".format(packet))
frame = packet.encode()
frame = chr(len(frame) + 1) + frame
if self.brm == '106A':
frame = '\xF0' + frame
return frame
def decode_frame(self, frame):
if self.brm == '106A' and frame.pop(0) != 0xF0:
raise nfc.clf.ProtocolError("14.4.1.1")
if len(frame) != frame.pop(0):
raise nfc.clf.ProtocolError("14.4.1.2")
if len(frame) < 2:
raise nfc.clf.TransmissionError("14.4.1.3")
if frame[0] != 0xD4 or frame[1] not in (0, 4, 6, 8, 10):
raise nfc.clf.ProtocolError("Table-86")
req_name = {0: 'ATR', 4: 'PSL', 6: 'DEP', 8: 'DSL', 10: 'RLS'}
packet = eval(req_name[frame[1]] + "_REQ").decode(frame)
log.debug("<< {0}".format(packet))
return packet
#
# Data Exchange Protocol Data Units
#
class ATR_REQ_RES(object):
def __str__(self):
nfcid3, gb = [str(ba).encode("hex") for ba in [self.nfcid3, self.gb]]
return self.PDU_SHOW.format(self=self, nfcid3=nfcid3, gb=gb)
@property
def lr(self):
return (64, 128, 192, 254)[(self.pp >> 4) & 0x3]
class ATR_REQ(ATR_REQ_RES):
PDU_CODE = bytearray('\xD4\x00')
PDU_NAME = 'ATR-REQ'
PDU_SHOW = "{self.PDU_NAME} NFCID3={nfcid3} DID={self.did:02x} "\
"BS={self.bs:02x} BR={self.br:02x} PP={self.pp:02x} GB={gb}"
def __init__(self, nfcid3, did, bs, br, pp, gb):
self.nfcid3, self.did, self.bs, self.br, self.pp, self.gb = \
nfcid3, did, bs, br, pp, gb
def __len__(self):
return 16 + len(self.gb)
@staticmethod
def decode(data):
if data.startswith(ATR_REQ.PDU_CODE):
nfcid3, (did, bs, br, pp) = data[2:12], data[12:16]
gb = data[16:] if pp & 0x02 else bytearray()
return ATR_REQ(nfcid3, did, bs, br, pp, gb)
def encode(self):
data = ATR_REQ.PDU_CODE + self.nfcid3
data.extend([self.did, self.bs, self.br, self.pp])
return data + self.gb
class ATR_RES(ATR_REQ_RES):
PDU_CODE = bytearray('\xD5\x01')
PDU_NAME = 'ATR-RES'
PDU_SHOW = "{self.PDU_NAME} NFCID3={nfcid3} DID={self.did:02x} "\
"BS={self.bs:02x} BR={self.br:02x} TO={self.to:02x} "\
"PP={self.pp:02x} GB={gb}"
def __init__(self, nfcid3, did, bs, br, to, pp, gb):
self.nfcid3, self.did, self.bs, self.br, self.to, self.pp, self.gb = \
nfcid3, did, bs, br, to, pp, gb
def __len__(self):
return 17 + len(self.gb)
@staticmethod
def decode(data):
if data.startswith(ATR_RES.PDU_CODE):
nfcid3, (did, bs, br, to, pp) = data[2:12], data[12:17]
gb = data[17:] if pp & 0x02 else bytearray()
return ATR_RES(nfcid3, did, bs, br, to, pp, gb)
def encode(self):
data = ATR_RES.PDU_CODE + self.nfcid3
data.extend([self.did, self.bs, self.br, self.to, self.pp])
return data + self.gb
@property
def wt(self):
return self.to & 0x0F
class PSL_REQ_RES(object):
def __str__(self):
return self.PDU_SHOW.format(name=self.PDU_NAME, self=self)
@classmethod
def decode(cls, data):
if data.startswith(cls.PDU_CODE):
try:
return cls(*data[2:])
except ValueError:
raise ProtocolError(cls.PDU_SPEC)
class PSL_REQ(PSL_REQ_RES):
PDU_CODE = bytearray('\xD4\x04')
PDU_NAME = 'PSL-REQ'
PDU_SPEC = 'Table-98'
PDU_SHOW = "{name} DID={self.did} BRS={self.brs:02x}, FSL={self.fsl:02x}"
def __init__(self, did, brs, fsl):
self.did, self.brs, self.fsl = did if did else 0, brs, fsl
def encode(self):
return PSL_REQ.PDU_CODE + bytearray([self.did, self.brs, self.fsl])
@property
def dsi(self):
return self.brs >> 3 & 0x07
@property
def dri(self):
return self.brs & 0x07
@property
def lr(self):
return (64, 128, 192, 254)[self.fsl & 0x03]
class PSL_RES(PSL_REQ_RES):
PDU_CODE = bytearray('\xD5\x05')
PDU_NAME = 'PSL-RES'
PDU_SPEC = 'Table-102'
PDU_SHOW = "{name} DID={self.did}"
def __init__(self, did):
self.did = did
def encode(self):
return PSL_RES.PDU_CODE + bytearray([self.did])
class DEP_REQ_RES(object):
PDU_SHOW = "{self.PDU_NAME} {self.pfb} DID={self.did} "\
"NAD={self.nad} DATA={data}"
PFB = namedtuple("PFB", "type, nad, did, pni")
LastInformation, MoreInformation, PositiveAck, NegativeAck,\
Attention, TimeoutExtension = (0, 1, 4, 5, 8, 9)
def __init__(self, pfb, did, nad, data):
self.pfb, self.did, self.nad = pfb, did, nad
self.data = bytearray() if data is None else data
def __str__(self):
data = str(self.data).encode("hex")
return self.PDU_SHOW.format(self=self, data=data)
@classmethod
def decode(cls, data):
if data.startswith(cls.PDU_CODE):
del data[0:2]
try:
pfb = data.pop(0)
pfb = cls.PFB(pfb >> 4, bool(pfb & 8), bool(pfb & 4), pfb & 3)
did = data.pop(0) if pfb.did else None
nad = data.pop(0) if pfb.nad else None
except IndexError:
raise ProtocolError(cls.PDU_SPEC)
return cls(pfb, did, nad, data)
def encode(self):
pfb = self.pfb
pfb = (pfb.type << 4) | (pfb.nad << 3) | (pfb.did << 2) | (pfb.pni)
data = self.PDU_CODE + chr(pfb)
if self.pfb.did: data.append(self.did)
if self.pfb.nad: data.append(self.nad)
return data + self.data
class DEP_REQ(DEP_REQ_RES):
PDU_CODE = bytearray('\xD4\x06')
PDU_NAME = 'DEP-REQ'
PDU_SPEC = 'Table-103'
class DEP_RES(DEP_REQ_RES):
PDU_CODE = bytearray('\xD5\x07')
PDU_NAME = 'DEP-RES'
PDU_SPEC = 'Table-104'
class DSL_REQ_RES(object):
def __init__(self, did):
self.did = did
def __str__(self):
return "{0} DID={1}".format(self.PDU_NAME, self.did)
@classmethod
def decode(cls, data):
if data.startswith(cls.PDU_CODE):
if len(data) > 3:
raise ProtocolError(cls.PDU_SPEC)
return cls(data[2] if len(data) == 3 else None)
def encode(self):
return self.PDU_CODE + ('' if self.did is None else chr(self.did))
class DSL_REQ(DSL_REQ_RES):
PDU_CODE = bytearray('\xD4\x08')
PDU_NAME = 'DSL-REQ'
PDU_SPEC = 'Table-110'
class DSL_RES(DSL_REQ_RES):
PDU_CODE = bytearray('\xD5\x09')
PDU_NAME = 'DSL-RES'
PDU_SPEC = 'Table-111'
class RLS_REQ_RES(DSL_REQ_RES):
pass
class RLS_REQ(RLS_REQ_RES):
PDU_CODE = bytearray('\xD4\x0A')
PDU_NAME = 'RLS-REQ'
PDU_SPEC = 'Table-112'
class RLS_RES(RLS_REQ_RES):
PDU_CODE = bytearray('\xD5\x0B')
PDU_NAME = 'RLS-RES'
PDU_SPEC = 'Table-113'
class Counters:
inf_sent = 0
inf_rcvd = 0
atn_sent = 0
atn_rcvd = 0
ack_sent = 0
ack_rcvd = 0
nak_sent = 0
nak_rcvd = 0
def fatal_error(message, retval=None):
log.error(message)
return retval
|
|
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple
from annotypes import Array, FrozenOrderedDict
from .concurrency import RLock
from .loggable import Loggable
from .request import Subscribe, Unsubscribe
from .response import Response
if TYPE_CHECKING:
from .models import BlockModel
Callback = Callable[[Response], None]
CallbackResponses = List[Tuple[Callback, Response]]
SubscriptionKeys = Dict[Tuple[Callback, int], Subscribe]
class DummyNotifier:
@property # type: ignore
@contextmanager
def changes_squashed(self):
yield
def add_squashed_change(self, path: List[str], data: Any) -> None:
pass
def add_squashed_delete(self, path: List[str]) -> None:
pass
def freeze(o):
# Cheaper than a subclass check, will find Models for us and freeze them
# into dicts
if hasattr(o, "notifier"):
o = FrozenOrderedDict(
(("typeid", o.typeid),)
+ tuple((k, freeze(getattr(o, k))) for k in o.call_types)
)
elif isinstance(o, dict):
# Recurse down in case there are any models down there
o = FrozenOrderedDict(tuple((k, freeze(v)) for k, v in o.items()))
elif o.__class__ is Array and hasattr(o.typ, "notifier"):
# Recurse down only if the type suggests it has a model
o = [freeze(v) for v in o.seq]
return o
class Notifier(Loggable):
"""Object that can service callbacks on given endpoints"""
def __init__(self, mri: str, lock: RLock, block: "BlockModel") -> None:
self.set_logger(mri=mri)
self._tree = NotifierNode(block)
self._lock = lock
# Incremented every time we do with changes_squashed
self._squashed_count = 0
self._squashed_changes: List[List] = []
self._subscription_keys: SubscriptionKeys = {}
def handle_subscribe(self, request: Subscribe) -> "CallbackResponses":
"""Handle a Subscribe request from outside. Called with lock taken"""
ret = self._tree.handle_subscribe(request, request.path[1:])
self._subscription_keys[request.generate_key()] = request
return ret
def handle_unsubscribe(self, request: Unsubscribe) -> "CallbackResponses":
"""Handle a Unsubscribe request from outside. Called with lock taken"""
subscribe = self._subscription_keys.pop(request.generate_key())
ret = self._tree.handle_unsubscribe(subscribe, subscribe.path[1:])
return ret
@property
def changes_squashed(self) -> "Notifier":
"""Context manager to allow multiple calls to notify_change() to be
made and all changes squashed into one consistent set. E.g:
with notifier.changes_squashed:
attr.set_value(1)
attr.set_alarm(MINOR)
"""
return self
def add_squashed_change(self, path: List[str], data: Any) -> None:
"""Register a squashed change to a particular path
Args:
path (list): The path of what has changed, relative from Block
data (object): The new data
"""
assert self._squashed_count, "Called while not squashing changes"
self._squashed_changes.append([path[1:], data])
def add_squashed_delete(self, path: List[str]) -> None:
"""Register a squashed deletion of a particular path
Args:
path (list): The path of what has changed, relative from Block
"""
assert self._squashed_count, "Called while not squashing changes"
self._squashed_changes.append([path[1:]])
def __enter__(self):
"""So we can use this as a context manager for squashing changes"""
self._lock.acquire()
self._squashed_count += 1
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
"""So we can use this as a context manager for squashing changes"""
responses = []
try:
self._squashed_count -= 1
if self._squashed_count == 0:
changes = self._squashed_changes
self._squashed_changes = []
# TODO: squash intermediate deltas here?
responses += self._tree.notify_changes(changes)
finally:
self._lock.release()
self._callback_responses(responses)
def _callback_responses(self, responses: "CallbackResponses") -> None:
for cb, response in responses:
try:
cb(response)
except Exception:
self.log.exception(f"Exception notifying {response}")
raise
class NotifierNode:
# Define slots so it uses less resources to make these
__slots__ = ["delta_requests", "update_requests", "children", "parent", "data"]
def __init__(self, data: Any, parent: "NotifierNode" = None) -> None:
self.delta_requests: List[Subscribe] = []
self.update_requests: List[Subscribe] = []
self.children: Dict[str, NotifierNode] = {}
self.parent = parent
self.data = data
def notify_changes(self, changes: List[List]) -> "CallbackResponses":
"""Set our data and notify anyone listening
Args:
changes (list): [[path, optional data]] where path is the path to
what has changed, and data is the unserialized object that has
changed
Returns:
list: [(callback, Response)] that need to be called
"""
ret = []
child_changes: Dict[str, List] = {}
for change in changes:
# Add any changes that our children need to know about
self._add_child_change(change, child_changes)
# If we have update subscribers, freeze at this level
if self.update_requests:
frozen = freeze(self.data)
for request in self.update_requests:
ret.append(request.update_response(frozen))
# If we have delta subscribers, freeze the change value
if self.delta_requests:
for change in changes:
change[-1] = freeze(change[-1])
for request in self.delta_requests:
ret.append(request.delta_response(changes))
# Now notify our children
for name, changes in child_changes.items():
ret += self.children[name].notify_changes(changes)
return ret
def _add_child_change(self, change: List, child_changes: Dict[str, List]) -> None:
path = change[0]
if path:
# This is for one of our children
name = path[0]
if name in self.children:
if len(change) == 2:
child_change = [path[1:], change[1]]
else:
child_change = [path[1:]]
child_changes.setdefault(name, []).append(child_change)
else:
# This is for us
if len(change) == 2:
child_change_dict = self._update_data(change[1])
else:
child_change_dict = self._update_data(None)
for name, child_change in child_change_dict.items():
child_changes.setdefault(name, []).append(child_change)
def _update_data(self, data: Any) -> Dict[str, List]:
"""Set our data and notify any subscribers of children what has changed
Args:
data (object): The new data
Returns:
dict: {child_name: [path_list, optional child_data]} of the change
that needs to be passed to a child as a result of this
"""
self.data = data
child_change_dict: Dict[str, List] = {}
# Reflect change of data to children
for name in self.children:
child_data = getattr(data, name, None)
if child_data is None:
# Deletion
child_change_dict[name] = [[]]
else:
# Change
child_change_dict[name] = [[], child_data]
return child_change_dict
def handle_subscribe(
self, request: Subscribe, path: List[str]
) -> "CallbackResponses":
"""Add to the list of request to notify, and notify the initial value of
the data held
Args:
request (Subscribe): The subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called
"""
ret = []
if path:
# Recurse down
name = path[0]
if name not in self.children:
self.children[name] = NotifierNode(getattr(self.data, name, None), self)
ret += self.children[name].handle_subscribe(request, path[1:])
else:
# This is for us
frozen = freeze(self.data)
if request.delta:
self.delta_requests.append(request)
ret.append(request.delta_response([[[], frozen]]))
else:
self.update_requests.append(request)
ret.append(request.update_response(frozen))
return ret
def handle_unsubscribe(
self, request: Subscribe, path: List[str]
) -> "CallbackResponses":
"""Remove from the notifier list and send a return
Args:
request (Subscribe): The original subscribe request
path (list): The relative path from ourself
Returns:
list: [(callback, Response)] that need to be called
"""
ret = []
if path:
# Recurse down
name = path[0]
child = self.children[name]
ret += child.handle_unsubscribe(request, path[1:])
if (
not child.children
and not child.update_requests
and not child.delta_requests
):
del self.children[name]
else:
# This is for us
if request in self.update_requests:
self.update_requests.remove(request)
else:
self.delta_requests.remove(request)
ret.append(request.return_response())
return ret
|
|
from __future__ import unicode_literals
from operator import attrgetter
from django.test import TestCase
from .models import Person
class RecursiveM2MTests(TestCase):
def test_recursive_m2m(self):
a, b, c, d = [
Person.objects.create(name=name)
for name in ["Anne", "Bill", "Chuck", "David"]
]
# Add some friends in the direction of field definition
# Anne is friends with Bill and Chuck
a.friends.add(b, c)
# David is friends with Anne and Chuck - add in reverse direction
d.friends.add(a, c)
# Who is friends with Anne?
self.assertQuerysetEqual(
a.friends.all(), [
"Bill",
"Chuck",
"David"
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
b.friends.all(), [
"Anne",
],
attrgetter("name")
)
# Who is friends with Chuck?
self.assertQuerysetEqual(
c.friends.all(), [
"Anne",
"David"
],
attrgetter("name"),
ordered=False
)
# Who is friends with David?
self.assertQuerysetEqual(
d.friends.all(), [
"Anne",
"Chuck",
],
attrgetter("name"),
ordered=False
)
# Bill is already friends with Anne - add Anne again, but in the
# reverse direction
b.friends.add(a)
# Who is friends with Anne?
self.assertQuerysetEqual(
a.friends.all(), [
"Bill",
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
b.friends.all(), [
"Anne",
],
attrgetter("name")
)
# Remove Anne from Bill's friends
b.friends.remove(a)
# Who is friends with Anne?
self.assertQuerysetEqual(
a.friends.all(), [
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
b.friends.all(), []
)
# Clear Anne's group of friends
a.friends.clear()
# Who is friends with Anne?
self.assertQuerysetEqual(
a.friends.all(), []
)
# Reverse relationships should also be gone
# Who is friends with Chuck?
self.assertQuerysetEqual(
c.friends.all(), [
"David",
],
attrgetter("name")
)
# Who is friends with David?
self.assertQuerysetEqual(
d.friends.all(), [
"Chuck",
],
attrgetter("name")
)
# Add some idols in the direction of field definition
# Anne idolizes Bill and Chuck
a.idols.add(b, c)
# Bill idolizes Anne right back
b.idols.add(a)
# David is idolized by Anne and Chuck - add in reverse direction
d.stalkers.add(a, c)
# Who are Anne's idols?
self.assertQuerysetEqual(
a.idols.all(), [
"Bill",
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is stalking Anne?
self.assertQuerysetEqual(
a.stalkers.all(), [
"Bill",
],
attrgetter("name")
)
# Who are Bill's idols?
self.assertQuerysetEqual(
b.idols.all(), [
"Anne",
],
attrgetter("name")
)
# Who is stalking Bill?
self.assertQuerysetEqual(
b.stalkers.all(), [
"Anne",
],
attrgetter("name")
)
# Who are Chuck's idols?
self.assertQuerysetEqual(
c.idols.all(), [
"David",
],
attrgetter("name"),
)
# Who is stalking Chuck?
self.assertQuerysetEqual(
c.stalkers.all(), [
"Anne",
],
attrgetter("name")
)
# Who are David's idols?
self.assertQuerysetEqual(
d.idols.all(), []
)
# Who is stalking David
self.assertQuerysetEqual(
d.stalkers.all(), [
"Anne",
"Chuck",
],
attrgetter("name"),
ordered=False
)
# Bill is already being stalked by Anne - add Anne again, but in the
# reverse direction
b.stalkers.add(a)
# Who are Anne's idols?
self.assertQuerysetEqual(
a.idols.all(), [
"Bill",
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is stalking Anne?
self.assertQuerysetEqual(
a.stalkers.all(), [
"Bill",
],
attrgetter("name")
)
# Who are Bill's idols
self.assertQuerysetEqual(
b.idols.all(), [
"Anne",
],
attrgetter("name")
)
# Who is stalking Bill?
self.assertQuerysetEqual(
b.stalkers.all(), [
"Anne",
],
attrgetter("name"),
)
# Remove Anne from Bill's list of stalkers
b.stalkers.remove(a)
# Who are Anne's idols?
self.assertQuerysetEqual(
a.idols.all(), [
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is stalking Anne?
self.assertQuerysetEqual(
a.stalkers.all(), [
"Bill",
],
attrgetter("name")
)
# Who are Bill's idols?
self.assertQuerysetEqual(
b.idols.all(), [
"Anne",
],
attrgetter("name")
)
# Who is stalking Bill?
self.assertQuerysetEqual(
b.stalkers.all(), []
)
# Clear Anne's group of idols
a.idols.clear()
# Who are Anne's idols
self.assertQuerysetEqual(
a.idols.all(), []
)
# Reverse relationships should also be gone
# Who is stalking Chuck?
self.assertQuerysetEqual(
c.stalkers.all(), []
)
# Who is friends with David?
self.assertQuerysetEqual(
d.stalkers.all(), [
"Chuck",
],
attrgetter("name")
)
|
|
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
Math functions for ArrayFire
"""
from .library import *
from .array import *
from .bcast import _bcast_var
from .util import _is_number
def _arith_binary_func(lhs, rhs, c_func):
out = Array()
is_left_array = isinstance(lhs, Array)
is_right_array = isinstance(rhs, Array)
if not (is_left_array or is_right_array):
raise TypeError("Atleast one input needs to be of type arrayfire.array")
elif (is_left_array and is_right_array):
safe_call(c_func(ct.pointer(out.arr), lhs.arr, rhs.arr, _bcast_var.get()))
elif (_is_number(rhs)):
ldims = dim4_to_tuple(lhs.dims())
rty = implicit_dtype(rhs, lhs.type())
other = Array()
other.arr = constant_array(rhs, ldims[0], ldims[1], ldims[2], ldims[3], rty)
safe_call(c_func(ct.pointer(out.arr), lhs.arr, other.arr, _bcast_var.get()))
else:
rdims = dim4_to_tuple(rhs.dims())
lty = implicit_dtype(lhs, rhs.type())
other = Array()
other.arr = constant_array(lhs, rdims[0], rdims[1], rdims[2], rdims[3], lty)
safe_call(c_func(ct.pointer(out.arr), other.arr, rhs.arr, _bcast_var.get()))
return out
def _arith_unary_func(a, c_func):
out = Array()
safe_call(c_func(ct.pointer(out.arr), a.arr))
return out
def cast(a, dtype):
"""
Cast an array to a specified type
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
dtype: af.Dtype
Must be one of the following:
- Dtype.f32 for float
- Dtype.f64 for double
- Dtype.b8 for bool
- Dtype.u8 for unsigned char
- Dtype.s32 for signed 32 bit integer
- Dtype.u32 for unsigned 32 bit integer
- Dtype.s64 for signed 64 bit integer
- Dtype.u64 for unsigned 64 bit integer
- Dtype.c32 for 32 bit complex number
- Dtype.c64 for 64 bit complex number
Returns
--------
out : af.Array
array containing the values from `a` after converting to `dtype`.
"""
out=Array()
safe_call(backend.get().af_cast(ct.pointer(out.arr), a.arr, dtype.value))
return out
def minof(lhs, rhs):
"""
Find the minimum value of two inputs at each location.
Parameters
----------
lhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
rhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
Returns
--------
out : af.Array
array containing the minimum value at each location of the inputs.
Note
-------
- Atleast one of `lhs` and `rhs` needs to be af.Array.
- If `lhs` and `rhs` are both af.Array, they must be of same size.
"""
return _arith_binary_func(lhs, rhs, backend.get().af_minof)
def maxof(lhs, rhs):
"""
Find the maximum value of two inputs at each location.
Parameters
----------
lhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
rhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
Returns
--------
out : af.Array
array containing the maximum value at each location of the inputs.
Note
-------
- Atleast one of `lhs` and `rhs` needs to be af.Array.
- If `lhs` and `rhs` are both af.Array, they must be of same size.
"""
return _arith_binary_func(lhs, rhs, backend.get().af_maxof)
def rem(lhs, rhs):
"""
Find the remainder.
Parameters
----------
lhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
rhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
Returns
--------
out : af.Array
Contains the remainders after dividing each value of lhs` with those in `rhs`.
Note
-------
- Atleast one of `lhs` and `rhs` needs to be af.Array.
- If `lhs` and `rhs` are both af.Array, they must be of same size.
"""
return _arith_binary_func(lhs, rhs, backend.get().af_rem)
def abs(a):
"""
Find the absolute values.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
Contains the absolute values of the inputs.
"""
return _arith_unary_func(a, backend.get().af_abs)
def arg(a):
"""
Find the theta value of the inputs in polar co-ordinates.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
Contains the theta values.
"""
return _arith_unary_func(a, backend.get().af_arg)
def sign(a):
"""
Find the sign of the inputs.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing -1 for negative values, 1 otherwise.
"""
return _arith_unary_func(a, backend.get().af_sign)
def round(a):
"""
Round the values to nearest integer.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the values rounded to nearest integer.
"""
return _arith_unary_func(a, backend.get().af_round)
def trunc(a):
"""
Round the values towards zero.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the truncated values.
"""
return _arith_unary_func(a, backend.get().af_trunc)
def floor(a):
"""
Round the values towards a smaller integer.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the floored values.
"""
return _arith_unary_func(a, backend.get().af_floor)
def ceil(a):
"""
Round the values towards a bigger integer.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the ceiled values.
"""
return _arith_unary_func(a, backend.get().af_ceil)
def hypot(lhs, rhs):
"""
Find the value of the hypotunese.
Parameters
----------
lhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
rhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
Returns
--------
out : af.Array
Contains the value of `sqrt(lhs**2, rhs**2)`.
Note
-------
- Atleast one of `lhs` and `rhs` needs to be af.Array.
- If `lhs` and `rhs` are both af.Array, they must be of same size.
"""
return _arith_binary_func(lhs, rhs, backend.get().af_hypot)
def sin(a):
"""
Sine of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the sine of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_sin)
def cos(a):
"""
Cosine of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the cosine of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_cos)
def tan(a):
"""
Tangent of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the tangent of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_tan)
def asin(a):
"""
Arc Sine of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the arc sine of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_asin)
def acos(a):
"""
Arc Cosine of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the arc cosine of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_acos)
def atan(a):
"""
Arc Tangent of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the arc tangent of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_atan)
def atan2(lhs, rhs):
"""
Find the arc tan using two values.
Parameters
----------
lhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
rhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
Returns
--------
out : af.Array
Contains the value arc tan values where:
- `lhs` contains the sine values.
- `rhs` contains the cosine values.
Note
-------
- Atleast one of `lhs` and `rhs` needs to be af.Array.
- If `lhs` and `rhs` are both af.Array, they must be of same size.
"""
return _arith_binary_func(lhs, rhs, backend.get().af_atan2)
def cplx(lhs, rhs=None):
"""
Create a complex array from real inputs.
Parameters
----------
lhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
rhs : optional: af.Array or scalar. default: None.
Multi dimensional arrayfire array or a scalar number.
Returns
--------
out : af.Array
Contains complex values whose
- real values contain values from `lhs`
- imaginary values contain values from `rhs` (0 if `rhs` is None)
Note
-------
- Atleast one of `lhs` and `rhs` needs to be af.Array.
- If `lhs` and `rhs` are both af.Array, they must be of same size.
"""
if rhs is None:
return _arith_unary_func(lhs, backend.get().af_cplx)
else:
return _arith_binary_func(lhs, rhs, backend.get().af_cplx2)
def real(a):
"""
Find the real values of the input.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the real values from `a`.
"""
return _arith_unary_func(a, backend.get().af_real)
def imag(a):
"""
Find the imaginary values of the input.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the imaginary values from `a`.
"""
return _arith_unary_func(a, backend.get().af_imag)
def conjg(a):
"""
Find the complex conjugate values of the input.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing copmplex conjugate values from `a`.
"""
return _arith_unary_func(a, backend.get().af_conjg)
def sinh(a):
"""
Hyperbolic Sine of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the hyperbolic sine of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_sinh)
def cosh(a):
"""
Hyperbolic Cosine of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the hyperbolic cosine of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_cosh)
def tanh(a):
"""
Hyperbolic Tangent of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the hyperbolic tangent of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_tanh)
def asinh(a):
"""
Arc Hyperbolic Sine of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the arc hyperbolic sine of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_asinh)
def acosh(a):
"""
Arc Hyperbolic Cosine of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the arc hyperbolic cosine of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_acosh)
def atanh(a):
"""
Arc Hyperbolic Tangent of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the arc hyperbolic tangent of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_atanh)
def root(lhs, rhs):
"""
Find the root values of two inputs at each location.
Parameters
----------
lhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
rhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
Returns
--------
out : af.Array
array containing the value of `lhs ** (1/rhs)`
Note
-------
- Atleast one of `lhs` and `rhs` needs to be af.Array.
- If `lhs` and `rhs` are both af.Array, they must be of same size.
"""
return _arith_binary_func(lhs, rhs, backend.get().af_root)
def pow(lhs, rhs):
"""
Find the power of two inputs at each location.
Parameters
----------
lhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
rhs : af.Array or scalar
Multi dimensional arrayfire array or a scalar number.
Returns
--------
out : af.Array
array containing the value of `lhs ** (rhs)`
Note
-------
- Atleast one of `lhs` and `rhs` needs to be af.Array.
- If `lhs` and `rhs` are both af.Array, they must be of same size.
"""
return _arith_binary_func(lhs, rhs, backend.get().af_pow)
def pow2(a):
"""
Raise 2 to the power of each element in input.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array where each element is 2 raised to power of the corresponding value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_pow2)
def exp(a):
"""
Exponential of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the exponential of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_exp)
def expm1(a):
"""
Exponential of each element in the array minus 1.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the exponential of each value from `a`.
Note
-------
- `a` must not be complex.
- This function provides a more stable result for small values of `a`.
"""
return _arith_unary_func(a, backend.get().af_expm1)
def erf(a):
"""
Error function of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the error function of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_erf)
def erfc(a):
"""
Complementary error function of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the complementary error function of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_erfc)
def log(a):
"""
Natural logarithm of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the natural logarithm of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_log)
def log1p(a):
"""
Logarithm of each element in the array plus 1.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the the values of `log(a) + 1`
Note
-------
- `a` must not be complex.
- This function provides a more stable result for small values of `a`.
"""
return _arith_unary_func(a, backend.get().af_log1p)
def log10(a):
"""
Logarithm base 10 of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the logarithm base 10 of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_log10)
def log2(a):
"""
Logarithm base 2 of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the logarithm base 2 of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_log2)
def sqrt(a):
"""
Square root of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the square root of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_sqrt)
def cbrt(a):
"""
Cube root of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the cube root of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_cbrt)
def factorial(a):
"""
factorial of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the factorial of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_factorial)
def tgamma(a):
"""
Performs the gamma function for each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the output of gamma function of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_tgamma)
def lgamma(a):
"""
Performs the logarithm of gamma function for each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the output of logarithm of gamma function of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_lgamma)
def iszero(a):
"""
Check if each element of the input is zero.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the output after checking if each value of `a` is 0.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_iszero)
def isinf(a):
"""
Check if each element of the input is infinity.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the output after checking if each value of `a` is inifnite.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_isinf)
def isnan(a):
"""
Check if each element of the input is NaN.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the output after checking if each value of `a` is NaN.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_isnan)
|
|
"""
This platform enables the possibility to control a MQTT alarm.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.mqtt/
"""
import logging
import re
import voluptuous as vol
from homeassistant.components import mqtt
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.const import (
CONF_CODE, CONF_DEVICE, CONF_NAME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED,
STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN,
CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_CODE_ARM_REQUIRED = 'code_arm_required'
CONF_PAYLOAD_DISARM = 'payload_disarm'
CONF_PAYLOAD_ARM_HOME = 'payload_arm_home'
CONF_PAYLOAD_ARM_AWAY = 'payload_arm_away'
CONF_PAYLOAD_ARM_NIGHT = 'payload_arm_night'
DEFAULT_ARM_NIGHT = 'ARM_NIGHT'
DEFAULT_ARM_AWAY = 'ARM_AWAY'
DEFAULT_ARM_HOME = 'ARM_HOME'
DEFAULT_DISARM = 'DISARM'
DEFAULT_NAME = 'MQTT Alarm'
DEPENDENCIES = ['mqtt']
PLATFORM_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Required(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_NIGHT, default=DEFAULT_ARM_NIGHT): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_AWAY, default=DEFAULT_ARM_AWAY): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_HOME, default=DEFAULT_ARM_HOME): cv.string,
vol.Optional(CONF_PAYLOAD_DISARM, default=DEFAULT_DISARM): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_CODE_ARM_REQUIRED, default=True): cv.boolean,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT alarm control panel through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT alarm control panel dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add an MQTT alarm control panel."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(alarm.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry=None,
discovery_hash=None):
"""Set up the MQTT Alarm Control Panel platform."""
async_add_entities([MqttAlarm(config, config_entry, discovery_hash)])
class MqttAlarm(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, alarm.AlarmControlPanel):
"""Representation of a MQTT alarm status."""
def __init__(self, config, config_entry, discovery_hash):
"""Init the MQTT Alarm Control Panel."""
self._state = None
self._config = config
self._unique_id = config.get(CONF_UNIQUE_ID)
self._sub_state = None
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe mqtt events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._config = config
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
def message_received(msg):
"""Run when new MQTT message has been received."""
if msg.payload not in (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED):
_LOGGER.warning("Received unexpected payload: %s", msg.payload)
return
self._state = msg.payload
self.async_write_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
{'state_topic': {'topic': self._config.get(CONF_STATE_TOPIC),
'msg_callback': message_received,
'qos': self._config.get(CONF_QOS)}})
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._config.get(CONF_NAME)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def code_format(self):
"""Return one or more digits/characters."""
code = self._config.get(CONF_CODE)
if code is None:
return None
if isinstance(code, str) and re.search('^\\d+$', code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
async def async_alarm_disarm(self, code=None):
"""Send disarm command.
This method is a coroutine.
"""
if not self._validate_code(code, 'disarming'):
return
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_DISARM),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_alarm_arm_home(self, code=None):
"""Send arm home command.
This method is a coroutine.
"""
code_required = self._config.get(CONF_CODE_ARM_REQUIRED)
if code_required and not self._validate_code(code, 'arming home'):
return
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_ARM_HOME),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_alarm_arm_away(self, code=None):
"""Send arm away command.
This method is a coroutine.
"""
code_required = self._config.get(CONF_CODE_ARM_REQUIRED)
if code_required and not self._validate_code(code, 'arming away'):
return
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_ARM_AWAY),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_alarm_arm_night(self, code=None):
"""Send arm night command.
This method is a coroutine.
"""
code_required = self._config.get(CONF_CODE_ARM_REQUIRED)
if code_required and not self._validate_code(code, 'arming night'):
return
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_ARM_NIGHT),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
def _validate_code(self, code, state):
"""Validate given code."""
conf_code = self._config.get(CONF_CODE)
check = conf_code is None or code == conf_code
if not check:
_LOGGER.warning('Wrong code entered for %s', state)
return check
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Alex Grigorevskiy
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
The standard periodic kernel which mentioned in:
[1] Gaussian Processes for Machine Learning, C. E. Rasmussen, C. K. I. Williams.
The MIT Press, 2005.
[2] Introduction to Gaussian processes. D. J. C. MacKay. In C. M. Bishop, editor,
Neural Networks and Machine Learning, pages 133-165. Springer, 1998.
"""
from .kern import Kern
from ...core.parameterization import Param
from paramz.transformations import Logexp
import numpy as np
class StdPeriodic(Kern):
"""
Standart periodic kernel
.. math::
k(x,y) = \theta_1 \exp \left[ - \frac{1}{2} \sum_{i=1}^{input\_dim}
\left( \frac{\sin(\frac{\pi}{T_i} (x_i - y_i) )}{l_i} \right)^2 \right] }
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance :math:`\theta_1` in the formula above
:type variance: float
:param period: the vector of periods :math:`\T_i`. If None then 1.0 is assumed.
:type period: array or list of the appropriate size (or float if there is only one period parameter)
:param lengthscale: the vector of lengthscale :math:`\l_i`. If None then 1.0 is assumed.
:type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)
:param ARD1: Auto Relevance Determination with respect to period.
If equal to "False" one single period parameter :math:`\T_i` for
each dimension is assumed, otherwise there is one lengthscale
parameter per dimension.
:type ARD1: Boolean
:param ARD2: Auto Relevance Determination with respect to lengthscale.
If equal to "False" one single lengthscale parameter :math:`l_i` for
each dimension is assumed, otherwise there is one lengthscale
parameter per dimension.
:type ARD2: Boolean
:param active_dims: indices of dimensions which are used in the computation of the kernel
:type active_dims: array or list of the appropriate size
:param name: Name of the kernel for output
:type String
:param useGPU: whether of not use GPU
:type Boolean
"""
def __init__(self, input_dim, variance=1., period=None, lengthscale=None, ARD1=False, ARD2=False, active_dims=None, name='std_periodic',useGPU=False):
super(StdPeriodic, self).__init__(input_dim, active_dims, name, useGPU=useGPU)
self.ARD1 = ARD1 # correspond to periods
self.ARD2 = ARD2 # correspond to lengthscales
self.name = name
if self.ARD1 == False:
if period is not None:
period = np.asarray(period)
assert period.size == 1, "Only one period needed for non-ARD kernel"
else:
period = np.ones(1)
else:
if period is not None:
period = np.asarray(period)
assert period.size == input_dim, "bad number of periods"
else:
period = np.ones(input_dim)
if self.ARD2 == False:
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel"
else:
lengthscale = np.ones(1)
else:
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == input_dim, "bad number of lengthscales"
else:
lengthscale = np.ones(input_dim)
self.variance = Param('variance', variance, Logexp())
assert self.variance.size==1, "Variance size must be one"
self.period = Param('period', period, Logexp())
self.lengthscale = Param('lengthscale', lengthscale, Logexp())
self.link_parameters(self.variance, self.period, self.lengthscale)
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(StdPeriodic, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.StdPeriodic"
input_dict["variance"] = self.variance.values.tolist()
input_dict["period"] = self.period.values.tolist()
input_dict["lengthscale"] = self.lengthscale.values.tolist()
input_dict["ARD1"] = self.ARD1
input_dict["ARD2"] = self.ARD2
return input_dict
def parameters_changed(self):
"""
This functions deals as a callback for each optimization iteration.
If one optimization step was successfull and the parameters
this callback function will be called to be able to update any
precomputations for the kernel.
"""
pass
def K(self, X, X2=None):
"""Compute the covariance matrix between X and X2."""
if X2 is None:
X2 = X
base = np.pi * (X[:, None, :] - X2[None, :, :]) / self.period
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale ), axis = -1 ) )
return self.variance * exp_dist
def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix associated to X."""
ret = np.empty(X.shape[0])
ret[:] = self.variance
return ret
def dK_dX(self, X, X2, dimX):
lengthscale2inv = np.ones(X.shape[1])/(self.lengthscale**2)
periodinv = np.ones(X.shape[1])/(self.period)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist * periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
k = self.variance*exp_dist[None,:,:]
full = -k*np.pi/2.*np.sin(2.*base)*lengthscale2inv[:,None,None]*periodinv[:,None,None]
return full[dimX,:,:]
def dK_dX2(self, X, X2, dimX2):
return -self.dK_dX(X, X2, dimX2)
def dK2_dXdX2(self, X, X2, dimX, dimX2):
lengthscale2inv = np.ones(X.shape[1])/(self.lengthscale**2)
periodinv = np.ones(X.shape[1])/(self.period)
period2inv = np.ones(X.shape[1])/(self.period**2)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist * periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
k = self.variance*exp_dist
dk_dx2 = self.dK_dX2( X, X2, dimX2)
ret = -dk_dx2*np.pi/2.*lengthscale2inv[dimX]*periodinv[dimX]*np.sin(2.*base[dimX,:,:])
if dimX == dimX2:
ret += k*(np.pi**2)*period2inv[dimX]*lengthscale2inv[dimX]*np.cos(2.*base[dimX,:,:])
return ret
def dK_dvariance(self, X, X2=None):
if X2 is None:
X2 = X
base = np.pi * (X[:, None, :] - X2[None, :, :]) / self.period
return np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale ), axis = -1 ) )
def dK_dlengthscale(self, X, X2=None):
if X2 is None:
X2=X
lengthscale3inv = np.ones(X.shape[1])/(self.lengthscale**3)
periodinv = np.ones(X.shape[1])/(self.period)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist *periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
return self.variance*np.sum((np.sin(base))**2, axis=0)*exp_dist/(self.lengthscale**3) if not self.ARD2 else self.variance*exp_dist[None,:,:]*(np.sin(base))**2*lengthscale3inv[:,None,None]
def dK_dperiod(self, X, X2=None):
if X2 is None:
X2=X
periodinv = 1/self.period
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist *periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
return self.variance*exp_dist*np.sum(np.sin(base)*np.cos(base)*base/self.period[:,None,None]/(self.lengthscale[:,None,None]**2), axis=0) if not self.ARD1 else self.variance*exp_dist[None,:,:]*np.sin(base)*np.cos(base)*base/self.period[:,None,None]/(self.lengthscale[:,None,None]**2);
def dK2_dvariancedX(self, X, X2, dimX):
lengthscale2inv = np.ones(X.shape[1])/(self.lengthscale**2)
periodinv = np.ones(X.shape[1])/(self.period)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist * periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
ret = -exp_dist*np.pi/2.*np.sin(2.*base)*lengthscale2inv[:,None,None]*periodinv[:,None,None]
return ret[dimX,:,:]
def dK2_dlengthscaledX(self, X, X2, dimX):
lengthscale2inv = np.ones(X.shape[1])/(self.lengthscale**2)
lengthscale3inv = np.ones(X.shape[1])/(self.lengthscale**3)
periodinv = np.ones(X.shape[1])/(self.period)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist *periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
if not self.ARD2:
ret = np.pi*self.variance*lengthscale3inv[dimX]*periodinv[dimX]*np.sin(2.*base)*exp_dist*(1 - 0.5*self.lengthscale*np.sum(lengthscale3inv[dimX]*np.sin(base)**2, axis=0))
else:
tmp = np.pi*self.variance*lengthscale3inv[:,None,None]*periodinv[dimX]*np.sin(2.*base[dimX, :, :])*exp_dist
ret = -0.5*(np.sin(base)**2)*lengthscale2inv[dimX]*tmp
ret[dimX,:,:] += tmp[dimX,:,:]
return ret
def dK2_dperioddX(self, X, X2, dimX):
lengthscale2inv = np.ones(X.shape[1])/(self.lengthscale**2)
periodinv = np.ones(X.shape[1])/(self.period)
period2inv = np.ones(X.shape[1])/(self.period**2)
period3inv = np.ones(X.shape[1])/(self.period**3)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist *periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
k = self.K(X,X2)
dk_dperiod = self.dK_dperiod(X,X2)
if self.ARD1:
ret = -dk_dperiod*np.pi*np.sin(2.*base[dimX,:,:])*lengthscale2inv[dimX]*periodinv[dimX]/2.
ret[dimX,:,:] += k*np.pi*lengthscale2inv[dimX]*period2inv[dimX]*(np.pi*np.cos(2.*base[dimX,:,:])*dist[dimX,:,:]*periodinv[dimX] + np.sin(2.*base[dimX,:,:])/2.)
return ret
else:
ret = self.variance*exp_dist[None,:,:]*np.pi*lengthscale2inv[:,None,None]*(np.pi*period3inv[:,None,None]*np.cos(2.*base)*dist - 0.25*np.pi*period3inv[:,None,None]*np.sin(2.*base)*np.sum(dist*np.sin(2.*base)*lengthscale2inv[:,None,None], axis=0) +0.5*np.sin(2.*base)*period2inv[:,None,None])
return ret[dimX,:,:]
def dK2_dvariancedX2(self, X, X2, dimX2):
return -self.dK2_dvariancedX(X, X2, dimX2)
def dK2_dlengthscaledX2(self, X, X2, dimX2):
return -self.dK2_dlengthscaledX(X, X2, dimX2)
def dK2_dperioddX2(self, X, X2, dimX2):
return -self.dK2_dperioddX(X, X2, dimX2)
def dK3_dvariancedXdX2(self, X, X2, dimX, dimX2):
lengthscale2inv = np.ones(X.shape[1])/(self.lengthscale**2)
periodinv = np.ones(X.shape[1])/(self.period)
period2inv = np.ones(X.shape[1])/(self.period**2)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist * periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
I = np.eye((X.shape[1]))
dk2_dvariancedx2 = self.dK2_dvariancedX2(X, X2, dimX2)
dk_dvariance = self.dK_dvariance(X, X2)
ret = -dk2_dvariancedx2*np.pi/2.*lengthscale2inv[dimX]*periodinv[dimX]*np.sin(2.*base[dimX,:,:])
ret[dimX,dimX2] += (dk_dvariance*(np.pi**2)*period2inv[dimX]*lengthscale2inv[dimX]*np.cos(2.*base[dimX,:,:]))[dimX, dimX2]
return ret
def dK3_dlengthscaledXdX2(self, X, X2, dimX, dimX2):
lengthscaleinv = np.ones(X.shape[1])/(self.lengthscale)
lengthscale2inv = np.ones(X.shape[1])/(self.lengthscale**2)
lengthscale3inv = np.ones(X.shape[1])/(self.lengthscale**3)
lengthscale4inv = np.ones(X.shape[1])/(self.lengthscale**4)
lengthscale5inv = np.ones(X.shape[1])/(self.lengthscale**5)
periodinv = np.ones(X.shape[1])/(self.period)
period2inv = np.ones(X.shape[1])/(self.period**2)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist * periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
k = self.variance*exp_dist;
dk2_dlengthgthscaledx2 = self.dK2_dlengthscaledX2(X, X2, dimX2)
dk_dx2 = self.dK_dX2(X,X2,dimX2)
dk_dlengthscale = self.dK_dlengthscale(X, X2)
I = np.eye((X.shape[1]))
if self.ARD2:
tmp1 =-dk2_dlengthgthscaledx2*np.pi/2.*lengthscale2inv[dimX]*periodinv[dimX]*np.sin(2.*base[dimX,:,:])
tmp2 = np.zeros_like(tmp1)
tmp3 = np.zeros_like(tmp1)
tmp4 = np.zeros_like(tmp1)
# i = j
tmp2 = np.zeros_like(tmp1)
tmp2[dimX,:,:] = dk_dx2*np.pi*lengthscale3inv[dimX]*periodinv[dimX]*np.sin(2.*base[dimX,:,:])
if dimX == dimX2: # j = k
tmp3 = dk_dlengthscale*(np.pi**2)*lengthscale2inv[dimX]*period2inv[dimX]*np.cos(2.*base[dimX,:,:])
# i = j = k
tmp4 = np.zeros_like(tmp1)
tmp4[dimX,:,:] = -2.*k[:,:]*(np.pi**2)*lengthscale3inv[dimX]*period2inv[dimX]*np.cos(2.*base[dimX,:,:])
return tmp1+tmp2+tmp3+tmp4
else:
tmp1 = -dk2_dlengthgthscaledx2[dimX2,:,:]*np.pi/2.*lengthscale2inv[dimX]*periodinv[dimX]*np.sin(2.*base[dimX,:,:]) + dk_dx2*np.pi*lengthscale3inv[dimX]*periodinv[dimX]*np.sin(2.*base[dimX,:,:])
tmp2 = np.zeros_like(tmp1)
if dimX == dimX2:
tmp2[:,:] = (dk_dlengthscale*(np.pi**2)*lengthscale2inv[dimX]*period2inv[dimX]*np.cos(2.*base[dimX]) -2.*k*(np.pi**2)*lengthscale3inv[dimX]*period2inv[dimX]*np.cos(2.*base[dimX,:,:]))
return tmp1+tmp2
def dK3_dperioddXdX2(self, X, X2, dimX, dimX2):
lengthscaleinv = np.ones(X.shape[1])/(self.lengthscale)
lengthscale2inv = np.ones(X.shape[1])/(self.lengthscale**2)
lengthscale3inv = np.ones(X.shape[1])/(self.lengthscale**3)
lengthscale4inv = np.ones(X.shape[1])/(self.lengthscale**4)
lengthscale5inv = np.ones(X.shape[1])/(self.lengthscale**5)
periodinv = np.ones(X.shape[1])/(self.period)
period2inv = np.ones(X.shape[1])/(self.period**2)
period3inv = np.ones(X.shape[1])/(self.period**3)
period4inv = np.ones(X.shape[1])/(self.period**4)
dist = np.rollaxis(X[:, None, :] - X2[None, :, :],2,0)
base = np.pi * dist * periodinv[:,None,None]
exp_dist = np.exp( -0.5* np.sum( np.square( np.sin( base ) / self.lengthscale[:,None,None] ), axis = 0 ) )
k = self.variance*exp_dist
dk2_dperioddx2 = self.dK2_dperioddX2(X, X2, dimX2)
dk_dx2 = self.dK_dX2(X, X2, dimX2)
dk_dperiod = self.dK_dperiod(X, X2)
if self.ARD1:
tmp1 = -dk2_dperioddx2*np.pi/2.*lengthscale2inv[dimX]*periodinv[dimX]*np.sin(2.*base[dimX,:,:])
tmp2 = np.zeros_like(tmp1)
tmp3 = np.zeros_like(tmp1)
tmp4 = np.zeros_like(tmp1)
# i = j
tmp2[dimX,:,:] = dk_dx2[:,:]*(np.pi*lengthscale2inv[dimX]*period2inv[dimX]*np.sin(2.*base[dimX,:,:])/2. + (np.pi**2)*lengthscale2inv[dimX]*period3inv[dimX]*np.cos(2.*base[dimX,:,:])*dist[dimX,:,:])
if dimX == dimX2: # j = k
tmp3 = dk_dperiod[:,:,:]*(np.pi**2)*period2inv[dimX]*lengthscale2inv[dimX]*np.cos(2.*base[dimX,:,:])
# i = j = k
tmp4[dimX,:,:] = -2.*k*(np.pi**2)*lengthscale2inv[dimX]*(period3inv[dimX]*np.cos(2.*base[dimX,:,:])-np.pi*period4inv[dimX]*np.sin(2.*base[dimX,:,:])*dist[dimX,:,:])
return tmp1+tmp2+tmp3+tmp4
else:
tmp1 = np.pi*lengthscale2inv[dimX]/2.*(-dk2_dperioddx2*periodinv[dimX]*np.sin(2.*base[dimX,:,:]) + dk_dx2*period2inv[dimX]*np.sin(2.*base[dimX,:,:]) + np.pi*dk_dx2*2.*period3inv[dimX]*np.cos(2.*base[dimX,:,:])*dist[dimX,:,:] )
tmp2 = np.zeros_like(tmp1)
if dimX == dimX2:
tmp2 = (np.pi**2)*lengthscale2inv[dimX]*period2inv[dimX]*(dk_dperiod*np.cos(2.*base[dimX,:,:]) -2.*k*periodinv[dimX]*np.cos(2.*base[dimX,:,:]) +2.*k*np.sin(2.*base[dimX,:,:])*np.pi*period2inv[dimX]*dist[dimX,:,:] )
return tmp1+tmp2
def update_gradients_full(self, dL_dK, X, X2=None):
"""derivative of the covariance matrix with respect to the parameters."""
if X2 is None:
X2 = X
base = np.pi * (X[:, None, :] - X2[None, :, :]) / self.period
sin_base = np.sin( base )
exp_dist = np.exp( -0.5* np.sum( np.square( sin_base / self.lengthscale ), axis = -1 ) )
dwl = self.variance * (1.0/np.square(self.lengthscale)) * sin_base*np.cos(base) * (base / self.period)
dl = self.variance * np.square( sin_base) / np.power( self.lengthscale, 3)
self.variance.gradient = np.sum(exp_dist * dL_dK)
#target[0] += np.sum( exp_dist * dL_dK)
if self.ARD1: # different periods
self.period.gradient = (dwl * exp_dist[:,:,None] * dL_dK[:, :, None]).sum(0).sum(0)
else: # same period
self.period.gradient = np.sum(dwl.sum(-1) * exp_dist * dL_dK)
if self.ARD2: # different lengthscales
self.lengthscale.gradient = (dl * exp_dist[:,:,None] * dL_dK[:, :, None]).sum(0).sum(0)
else: # same lengthscales
self.lengthscale.gradient = np.sum(dl.sum(-1) * exp_dist * dL_dK)
def update_gradients_direct(self, dL_dVar, dL_dPer, dL_dLen):
self.variance.gradient = dL_dVar
self.period.gradient = dL_dPer
self.lengthscale.gradient = dL_dLen
def reset_gradients(self):
self.variance.gradient = 0.
if not self.ARD1:
self.period.gradient = 0.
else:
self.period.gradient = np.zeros(self.input_dim)
if not self.ARD2:
self.lengthscale.gradient = 0.
else:
self.lengthscale.gradient = np.zeros(self.input_dim)
def update_gradients_diag(self, dL_dKdiag, X):
"""derivative of the diagonal of the covariance matrix with respect to the parameters."""
self.variance.gradient = np.sum(dL_dKdiag)
self.period.gradient = 0
self.lengthscale.gradient = 0
def dgradients(self, X, X2):
g1 = self.dK_dvariance(X, X2)
g2 = self.dK_dperiod(X, X2)
g3 = self.dK_dlengthscale(X, X2)
return [g1, g2, g3]
def dgradients_dX(self, X, X2, dimX):
g1 = self.dK2_dvariancedX(X, X2, dimX)
g2 = self.dK2_dperioddX(X, X2, dimX)
g3 = self.dK2_dlengthscaledX(X, X2, dimX)
return [g1, g2, g3]
def dgradients_dX2(self, X, X2, dimX2):
g1 = self.dK2_dvariancedX2(X, X2, dimX2)
g2 = self.dK2_dperioddX2(X, X2, dimX2)
g3 = self.dK2_dlengthscaledX2(X, X2, dimX2)
return [g1, g2, g3]
def dgradients2_dXdX2(self, X, X2, dimX, dimX2):
g1 = self.dK3_dvariancedXdX2(X, X2, dimX, dimX2)
g2 = self.dK3_dperioddXdX2(X, X2, dimX, dimX2)
g3 = self.dK3_dlengthscaledXdX2(X, X2, dimX, dimX2)
return [g1, g2, g3]
def gradients_X(self, dL_dK, X, X2=None):
K = self.K(X, X2)
if X2 is None:
dL_dK = dL_dK+dL_dK.T
X2 = X
dX = -np.pi*((dL_dK*K)[:,:,None]*np.sin(2*np.pi/self.period*(X[:,None,:] - X2[None,:,:]))/(2.*np.square(self.lengthscale)*self.period)).sum(1)
return dX
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def input_sensitivity(self, summarize=True):
return self.variance*np.ones(self.input_dim)/self.lengthscale**2
|
|
# -*- coding: utf-8 -*-
import os
import logging
from itertools import chain
import numpy as np
import pandas as pd
from anacode import codes
from anacode.api import writers
from anacode.api.writers import CSV_FILES
from anacode.agg import plotting
from nltk.text import TextCollection, Text
def _capitalize(string):
return ''.join(map(lambda s: s.capitalize(), string.split('_')))
class ApiCallDataset(object):
"""Base class for specific call data sets."""
pass
class NoRelevantData(Exception):
"""Thrown when :class:`anacode.agg.aggregations.ApiCallDataset` does not
have data needed to finish aggregation.
"""
pass
class ConceptsDataset(ApiCallDataset):
"""Concept dataset container with easy aggregation capabilities.
"""
def __init__(self, concepts, surface_strings):
"""Initialize instance by providing the two dataframes required for concepts representation.
:param concepts: List of found concepts with metadata
:type concepts: pandas.DataFrame
:param surface_strings: List of strings realizing found concepts
:type surface_strings: pandas.DataFrame
"""
self._concepts = concepts
self._surface_strings = surface_strings
if self._concepts is not None and 'concept_type' in self._concepts:
self._concept_filter = set(self._concepts.concept_type.unique())
else:
self._concept_filter = set()
self._concept_filter.add('')
def concept_frequency(self, concept, concept_type='', normalize=False):
"""Return occurrence count of input concept or concept list. Resulting
list has concepts sorted just like they were in input if it was list or
tuple. Concepts that are not of *concept_type* or that are not in the
dataset will always have zero count. Setting normalize will turn
absolute counts into relative percentages.
Specifying *concept_type* is intended to be used only with
normalization. If used without *normalize* set it will have no effect
except for concepts that do not have said type whose count will be zero.
When used with *normalize* set percentages will reflect counts only
within specified *concept_type* instead of the whole dataset.
:param concept: name(s) of concept to count occurrences for
:type concept: list, tuple, set or string
:param concept_type: Limit result concepts counts only to concepts
with this type
:type concept_type: str
:param normalize: Returns relative counts of concepts in specified
concept type if set, otherwise returns absolute counts
:type normalize: bool
:return: pandas.Series -- Concept names as index and their counts as
values sorted as they were in input.
"""
if self._concepts is None:
raise NoRelevantData('Relevant concept data is not available!')
if concept_type not in self._concept_filter:
msg = '"{}" not valid filter string'.format(concept_type)
raise ValueError(msg)
if not isinstance(concept, (tuple, list, set)):
concept = {concept}
con = self._concepts
if concept_type:
con = con[con.concept_type == concept_type]
counts = con[con.concept.isin(concept)].groupby('concept')['freq'].sum()
if isinstance(concept, (tuple, list)):
counts = counts.reindex(concept)
elif isinstance(concept, set):
counts = counts.reindex(list(concept))
else:
counts = counts.reindex(concept)
result = counts.rename('Count').replace(np.nan, 0)
result.index.name = _capitalize(concept_type) or 'Concept'
if normalize:
size = con.freq.sum()
result = result.astype(float) / size
else:
result = result.astype(int)
result._plot_id = codes.CONCEPT_FREQUENCY
return result
def most_common_concepts(self, n=15, concept_type='', normalize=False):
"""Counts concepts and returns n most occurring ones sorted by their
count descending. Counted concepts can be filtered by their type using
*concept_type* and returned counts can be normalized with *normalize*.
If both *concept_type* and *normalize* are specified concept ratios
will be computed only from concept counts within given *concept_type*.
:param n: Maximum number of most common concepts to return
:type n: int
:param concept_type: Limit concept counts only to concepts whose type
starts with this string
:type concept_type: str
:param normalize: Returns relative frequencies if normalize is True
:type normalize: bool
:return: pandas.Series -- Concept names as index and their counts as
values sorted descending
"""
if self._concepts is None:
raise NoRelevantData('Relevant concept data is not available!')
if concept_type not in self._concept_filter:
msg = '"{}" not valid filter string'.format(concept_type)
raise ValueError(msg)
con = self._concepts
if concept_type:
con = con[con.concept_type == concept_type]
con_counts = con.groupby('concept').agg({'freq': 'sum'}).freq
result = con_counts.rename('Count').sort_values(ascending=False)[:n]
result.index.name = _capitalize(concept_type) or 'Concept'
result._plot_id = codes.MOST_COMMON_CONCEPTS
if normalize:
result = result.astype(float) / con.freq.sum()
return result
def least_common_concepts(self, n=15, concept_type='', normalize=False):
"""Counts concepts and returns n least frequent ones sorted by their
count ascending. Counted concepts can be filtered by their type using
*concept_type* and returned counts can be normalized with *normalize*.
If both *concept_type* and *normalize* are specified, concept ratios
will be computed only from concept counts within given *concept_type*.
:param n: Maximum number of concepts to return
:type n: int
:param concept_type: Limit concept counts only to concepts whose type
starts with this string
:type concept_type: str
:param normalize: Returns relative frequencies if normalize is True
:type normalize: bool
:return: pandas.Series -- Concept names as index and their counts as
values sorted ascending
"""
if self._concepts is None:
raise NoRelevantData('Relevant concept data is not available!')
if concept_type not in self._concept_filter:
msg = '"{}" not valid filter string'.format(concept_type)
raise ValueError(msg)
con = self._concepts
if concept_type:
con = con[con.concept_type == concept_type]
con_counts = con.groupby('concept').agg({'freq': 'sum'}).freq
result = con_counts.rename('Count').sort_values()[:n]
result.index.name = _capitalize(concept_type) or 'Concept'
result._plot_id = codes.LEAST_COMMON_CONCEPTS
if normalize:
result = result.astype(float) / con.freq.sum()
return result
def co_occurring_concepts(self, concept, n=15, concept_type=''):
"""Find *n* concepts co-occurring frequently in texts of this dataset
with given *concept*, sorted by descending frequency. Co-occurring concepts can be
filtered by their type.
:param concept: Concept to inspect for co-occurring concepts
:type concept: str
:param n: Maximum number of returned co-occurring concepts
:type n: int
:param concept_type: Limit co-occurring concept counts only to this type
of concepts.
:type concept_type: str
:return: pandas.Series -- Co-occurring concept names as index and their
frequencies sorted by descending frequency
"""
if self._concepts is None:
raise NoRelevantData('Relevant concept data is not available!')
if concept_type not in self._concept_filter:
msg = '"{}" not valid filter string'.format(concept_type)
raise ValueError(msg)
con = self._concepts
identity_filter = con.concept.str.lower() == concept.lower()
relevant_texts = con[identity_filter][['doc_id', 'text_order']]
relevant_texts = relevant_texts.set_index(['doc_id', 'text_order'])
if relevant_texts.shape[0] != 0:
if concept_type:
type_filter = con.concept_type == concept_type
else:
type_filter = True
con = con[type_filter & (identity_filter == False)]
con = relevant_texts.join(con.set_index(['doc_id', 'text_order']))
con_counts = con.groupby('concept').agg({'freq': 'sum'}).freq
con_counts = con_counts.rename('Count').sort_values(ascending=False)
result = con_counts[:n].astype(int)
else:
result = pd.Series([]).rename('Count')
result.index.name = _capitalize(concept_type) or 'Concept'
result._concept = concept
result._plot_id = codes.CO_OCCURING_CONCEPTS
return result
def nltk_textcollection(self, concept_type=''):
"""Wraps concepts of each represented documents into nltk.text.Text and
returns these wrapped in nltk.text.TextCollection.
:param concept_type: Limit gathered concepts only to this type of
concepts
:type concept_type: str
:return: nltk.text.TextCollection -- TextCollection of represented
documents
"""
if self._concepts is None:
raise NoRelevantData('Relevant concept data is not available!')
if concept_type not in self._concept_filter:
msg = '"{}" not valid filter string'.format(concept_type)
raise ValueError(msg)
con = self._concepts
if concept_type:
con = con[con.concept_type == concept_type]
texts = []
docs_concepts = con.groupby(['doc_id', 'concept'])['freq'].sum()
docs_concepts = docs_concepts.reset_index()
for doc_id in self._concepts.doc_id.unique():
concepts = docs_concepts[docs_concepts.doc_id == doc_id]
concepts = concepts.set_index('concept')['freq']
doc = chain.from_iterable([w] * c for w, c in concepts.iteritems())
texts.append(Text(doc))
return TextCollection(texts)
def make_idf_filter(self, threshold, concept_type=''):
"""Generates concept filter based on idf values of concepts in represented
documents. This filter can be directly used as parameter for
concept_cloud call.
:param threshold: Minimum IDF of concept that will pass the filter
:type threshold: float
:param concept_type: Limit co-occurring concept counts only to this type
of concepts.
:type concept_type: str
:return: callable -- Function that can be used as idf_func in
concept_cloud
"""
corpus = self.nltk_textcollection(concept_type)
def idf_filter(concept):
"""Computes IDF of concept in corpus and decides if the concept is relevant wrt the provided threshold.
:param concept: Concept name for which to retrieve IDF
:type concept: str
:return: bool -- True if concept is relevant, else False
otherwise
"""
return corpus.idf(concept) >= threshold
return idf_filter
def make_time_series(self, concepts, date_info, delta, interval=None):
"""Creates DataFrame with counts for each *concepts* in every *delta*
time tick that exists in *interval*. If you do not specify interval it
will be computed from date_info to include all documents.
In concepts dataset there is no information about document release date
so you will have to provide this information externally as *date_info*.
It needs to be a map object that has all document ids from concept's
dataset as keys and they refer to datetime.date representing release
date for the document.
Result will include 0 counts for ticks where concepts were not
mentioned. In each row there will also be start and stop times for that
particular count. Counts from stop time are not included in the tick.
:param concepts: List of concept names to make time series for
:type concepts: list
:param date_info: Keys need to be document ids in this dataset and
values datetime.datetime or datetime.date objects
:type date_info: dict
:param delta: Time series tick size
:param interval: (start, stop) where both values are datetimes or dates
:type interval: tuple
:return: pandas.DataFrame -- DataFrame with columns "Concept", "Count",
"Start" and "Stop"
"""
if self._concepts is None:
raise NoRelevantData('Relevant concept data is not available!')
if interval is None:
interval = min(date_info.values()), max(date_info.values()) + delta
tick_counts, ticks = [], []
con = self._concepts
con = con[con.concept.isin(concepts)]
dates = pd.Series([date_info[doc_id] for doc_id in con.doc_id])
last, stop = interval
current = last + delta
while last < stop:
relevant = con[((dates >= last) & (dates < current)).tolist()]
counts = relevant.groupby('concept').agg({'freq': 'sum'})['freq']
concept_counts = [counts.get(c, 0) for c in concepts]
tick_counts.append(concept_counts)
ticks.append((last, current))
last = current
current = current + delta
concept_frames = []
concept_count_lists = list(zip(*tick_counts))
ticks = list(zip(*ticks))
for concept, counts in zip(concepts, concept_count_lists):
count_df = pd.Series(counts, name='Count').reset_index()
count_df['Concept'] = concept
count_df['Start'] = ticks[0]
count_df['Stop'] = ticks[1]
concept_frames.append(count_df)
retval = pd.concat(concept_frames)
retval.reset_index(drop=True, inplace=True)
retval.drop('index', axis=1, inplace=True)
return retval
def concept_frequencies(self, max_concepts=200, concept_type='',
concept_filter=None):
"""Returns pandas series with counts for all concepts from the dataset.
To filter words that will be showed in the cloud you can use
*concept_type* and *concept_filter*. The former is specific type of
concepts that you only want to have present in the result and
the latter is callable that takes concept name and returns bool
to indicate whether given concept should pass the filter. You can set
both at the same time. *concept_type* is applied first,
*concept_filter* second.
:param max_concepts: Maximum number of concepts that will be plotted
:type max_concepts: int
:param concept_type: Limit concepts only to concepts whose type starts
with this string
:type concept_type: str
:param concept_filter: If not None given callable needs to accept one
string parameter that is concept name and evaluate it if it should pass
the filter - callable returns True - or not - callable returns False.
Only concepts that pass can be seen on resulting concept cloud image
:type concept_filter: callable
:return: pandas.Series -- Concept names as index and their counts as
values
"""
if self._concepts is None:
raise NoRelevantData('Relevant concept data is not available!')
if concept_type not in self._concept_filter:
msg = '"{}" not valid filter string'.format(concept_type)
raise ValueError(msg)
con = self._concepts
if concept_type:
con = con[con.concept_type == concept_type]
if concept_filter is not None:
con = con[list(map(concept_filter, con.concept))]
data = con.groupby('concept')['freq'].sum()
frequencies = data.sort_values().tail(max_concepts).reset_index()
frequencies._plot_id = codes.CONCEPT_CLOUD
frequencies.index.name = _capitalize(concept_type) or 'Concept'
return frequencies
def frequency_relevance(self, concepts=None, n=15, concept_type=''):
if self._concepts is None:
raise NoRelevantData('Relevant concept data is not available!')
if concept_type not in self._concept_filter:
msg = '"{}" not valid filter string'.format(concept_type)
raise ValueError(msg)
con = self._concepts
if concept_type:
con = con[con.concept_type == concept_type]
if concepts:
con = con[con.concept.isin(set(concepts))]
agg = {'freq': 'sum', 'relevance_score': 'mean'}
result = con.groupby('concept').agg(agg)
if not concepts:
result.sort_values('relevance_score', ascending=False, inplace=True)
result = result.head(n)
elif isinstance(concepts, (list, tuple)):
result = result.reindex(concepts)
result.rename(inplace=True, columns={'relevance_score': 'Relevance',
'freq': 'Frequency'})
result.index.name = _capitalize(concept_type) or 'Concept'
result = result[result['Relevance'].isnull() == False]
result._plot_id = codes.FREQUENCY_RELEVANCE
return result
def surface_forms(self, concept, n=15):
"""Find `n` random surface strings from analyzed text that were
identified as `concept`.
:param concept: Inspect this concept surface forms.
:param n: Maximum number of unique surface forms returned
:return: set -- Set with maximum of `n` surface forms of `concept`
"""
if self._surface_strings is None:
raise NoRelevantData('Relevant surface data is not available!')
data = self._surface_strings
data = data[data.concept.str.lower() == concept.lower()]
surface_forms = list(data.surface_string.unique())
return set(surface_forms[:n])
class CategoriesDataset(ApiCallDataset):
"""Categories dataset container with easy aggregation
capabilities.
"""
def __init__(self, categories):
"""Initialize instance by providing categories data set.
:param categories: List of document category probabilities
:type categories: pandas.DataFrame
"""
self._categories = categories
def categories(self):
"""Aggregates categories across the whole dataset.
:return: pandas.Series --
"""
if self._categories is None:
raise NoRelevantData('Relevant category data is not available!')
cat = self._categories
all_cats = cat.groupby('category')['probability'].mean()
all_cats.sort_values(ascending=False, inplace=True)
all_cats.rename('Probability', inplace=True)
all_cats._plot_id = codes.AGGREGATED_CATEGORIES
all_cats.index.name = 'Category'
return all_cats
def main_category(self):
"""Finds the main category of a dataset.
:return: str -- Name of main category.
"""
if self._categories is None:
raise NoRelevantData('Relevant category data is not available!')
cat = self._categories
all_cats = cat.groupby('category')['probability'].mean()
return all_cats.sort_values(ascending=False).index[0]
class SentimentDataset(ApiCallDataset):
"""Sentiment dataset container with easy aggregation
capabilities.
"""
def __init__(self, sentiments):
"""Initialize instance by providing sentiments data set.
:param sentiments: List of document sentiment inclinations
:type sentiments: pandas.DataFrame
"""
self._sentiments = sentiments
def average_sentiment(self):
"""Computes and returns average document sentiment. Result is a number
from [-1,1], where higher number means more positive sentiment.
:return: float -- Average document sentiment
"""
if self._sentiments is None:
raise NoRelevantData('Relevant sentiment data is not available!')
sen = self._sentiments
return sen.sentiment_value.mean()
class ABSADataset(ApiCallDataset):
"""ABSA data set container that will provides easy aggregation
capabilities.
"""
def __init__(self, entities, normalized_texts,
relations, relations_entities,
evaluations, evaluations_entities):
"""Initialize instance by providing all absa data sets.
:param entities: List of entities used in texts
:type entities: pandas.DataFrame
:param normalized_texts: List of chinese normalized texts
:type normalized_texts: pandas.DataFrame
:param relations: List of relations with metadata
:type relations: pandas.DataFrame
:param relations_entities: List of entities used in relations
:type relations_entities: pandas.DataFrame
:param evaluations: List of entity evaluations
:type evaluations: pandas.DataFrame
:param evaluations_entities: List of entities used in evaluations
:type evaluations_entities: pandas.DataFrame
"""
self._entities = entities
self._normalized_texts = normalized_texts
self._relations = relations
self._relations_entities = relations_entities
self._evaluations = evaluations
self._evaluations_entities = evaluations_entities
def entity_frequency(self, entity, entity_type='', normalize=False):
"""Return occurrence count of input entity or entity list. Resulting
list has entities sorted just like they were in input if it was list or
tuple. Entities whose entity_type does not start with given one or that
are not in the dataset will always have zero count. Setting normalize
will turn absolute counts into relative percentages.
Specifying *entity_type* is intended to be used only with
normalization. If used without *normalize* set it will have no effect
except for possible before mentioned zeroing. When used with *normalize*
set result percentages will reflect counts only within specified
*concept_type* instead of the whole dataset.
:param entity: Entity name or tuple/list/set of entity names
:type entity: tuple, list, set or str
:param entity_type: Optional filter for entity type to consider
:type entity_type: str
:param normalize: Returns relative frequencies if normalize is True
:type normalize: bool
:return: pandas.Series -- Entity names as index entity frequencies as
values sorted as input if it was tuple or list
"""
if self._entities is None:
raise NoRelevantData('Relevant entities data is not available!')
if not isinstance(entity, (tuple, list, set)):
entity = {entity}
ents = self._entities
ents = ents[ents.entity_type.str.startswith(entity_type)]
counts = ents['entity_name'].value_counts(normalize=normalize)
if isinstance(entity, (tuple, list)):
counts = counts.reindex(entity)
elif isinstance(entity, set):
counts = counts.reindex(list(entity))
else:
counts = counts.reindex(entity)
result = counts.rename('Count').replace(np.nan, 0)
result.index.name = _capitalize(entity_type) or 'Entity'
if not normalize:
result = result.astype(int)
result._plot_id = codes.ENTITY_FREQUENCY
return result
def most_common_entities(self, n=15, entity_type='', normalize=False):
"""Counts entities and returns n most occurring ones sorted by their
count descending. Counted entities can be filtered by their type using
*entity_type* and returned counts can be normalized with *normalize*.
If both *entity_type* and *normalize* are specified entity ratios
will be computed only from entity counts within given *entity_type*.
:param n: Maximum number of most common entities to return
:type n: int
:param entity_type: Limit entities counts only to entities whose type
starts with this string
:type entity_type: str
:param normalize: Returns relative frequencies if normalize is True
:type normalize: bool
:return: pandas.Series -- Entity names as index and their counts as
values sorted descending
"""
if self._entities is None:
raise NoRelevantData('Relevant entity data is not available!')
ent = self._entities
ent = ent[ent.entity_type.str.startswith(entity_type)]
result = ent['entity_name'].value_counts(normalize=normalize)[:n]
result = result.rename('Count')
result._plot_id = codes.MOST_COMMON_ENTITIES
result.index.name = _capitalize(entity_type) or 'Entity'
return result
def least_common_entities(self, n=15, entity_type='', normalize=False):
"""Counts entities and returns n least frequent ones sorted by their
count ascending. Counted entities can be filtered by their type using
*entity_type* and returned counts can be normalized with *normalize*.
If both *entity_type* and *normalize* are specified entity ratios
will be computed only from entity counts within given *entity_type*.
:param n: Maximum number of least frequent entities to return
:type n: int
:param entity_type: Limit entities counts only to entities whose type
starts with this string
:type entity_type: str
:param normalize: Returns relative frequencies if normalize is True
:type normalize: bool
:return: pandas.Series -- Entity names as index and their counts as
values sorted descending
"""
if self._entities is None:
raise NoRelevantData('Relevant entity data is not available!')
ent = self._entities
ent = ent[ent.entity_type.str.startswith(entity_type)]['entity_name']
result = ent.value_counts(normalize=normalize, ascending=True)[:n]
result._plot_id = codes.LEAST_COMMON_ENTITIES
result.index.name = _capitalize(entity_type) or 'Entity'
return result
def co_occurring_entities(self, entity, n=15, entity_type=''):
"""Find *n* entities co-occurring frequently in texts of this dataset
with given entity, sorted descending. Co-occurring entities can be
filtered by their type.
:param entity: Entity to inspect for co-occurring entities
:type entity: str
:param n: Maximum count of returned entities
:type n: int
:param entity_type: Limit co-occurring entity counts only to this type
of entities.
:type entity_type: str
:return: pandas.Series -- Co-occurring entity names as index and their
counts as values sorted descending
"""
if self._entities is None:
raise NoRelevantData('Relevant entity data is not available!')
index_name = _capitalize(entity_type) or 'Entity'
ent, doc_txt = self._entities, ['doc_id', 'text_order']
entity_filter = ent.entity_name.str.lower() == entity.lower()
docs = ent[entity_filter][doc_txt].drop_duplicates()
if docs.shape[0] == 0:
result = pd.Series([]).rename('Count')
result.index.name = index_name
return result
docs = docs.set_index(doc_txt)
type_filter = ent.entity_type.str.startswith(entity_type)
ent = ent[type_filter & (entity_filter == False)].set_index(doc_txt)
result = docs.join(ent, how='inner')
if result.shape[0] == 0:
result = pd.Series([]).rename('Count')
result.index.name = index_name
return result
result = result.groupby('entity_name').size().rename('Count')
result = result.sort_values(ascending=False)[:n]
result._plot_id = codes.CO_OCCURING_ENTITIES
result._entity = entity
result.index.name = index_name
return result
def best_rated_entities(self, n=15, entity_type=''):
"""Find top *n* rated entities in this dataset sorted descending
by their mean rating.
:param n: Maximum count of returned entities
:type n: int
:param entity_type: Optional filter for entity type to consider
:type entity_type: str
:return: pandas.Series -- Best rated entities in this dataset as
index and their ratings as values sorted descending
"""
if self._relations is None or self._relations_entities is None:
raise NoRelevantData('Relevant relation data is not available!')
idx = ['doc_id', 'text_order', 'relation_id']
rels, ents = self._relations, self._relations_entities
rels = rels[rels.sentiment_value.abs() < 100]
ent_evals = pd.merge(rels, ents, 'inner', on=idx)
ent_evals = ent_evals[ent_evals.entity_type.str.startswith(entity_type)]
agg = {'sentiment_value': 'mean'}
mean_evals = ent_evals.groupby('entity_name').agg(agg)
mean_evals = mean_evals.sentiment_value.rename('Sentiment')
result = mean_evals.sort_values(ascending=False)[:n]
result._plot_id = codes.BEST_RATED_ENTITIES
result.index.name = _capitalize(entity_type) or 'Entity'
return result
def worst_rated_entities(self, n=15, entity_type=''):
"""Find *n* worst rated entities in this dataset sorted ascending
by their mean rating.
:param n: Maximum count of returned entities
:type n: int
:param entity_type: Optional filter for entity type to consider
:type entity_type: str
:return: pandas.DataFrame -- Worst rated entities in this dataset as
index and their ratings as values sorted ascending
"""
if self._relations is None or self._relations_entities is None:
raise NoRelevantData('Relevant relation data is not available!')
idx = ['doc_id', 'text_order', 'relation_id']
rels, ents = self._relations, self._relations_entities
rels = rels[rels.sentiment_value.abs() < 100]
ent_evals = rels.set_index(idx).join(ents.set_index(idx)).reset_index()
ent_evals = ent_evals[ent_evals.entity_type.str.startswith(entity_type)]
agg = {'sentiment_value': 'mean'}
mean_evals = ent_evals.groupby('entity_name').agg(agg)
mean_evals = mean_evals.sentiment_value.rename('Sentiment')
result = mean_evals.sort_values()[:n]
result._plot_id = codes.WORST_RATED_ENTITIES
result.index.name = _capitalize(entity_type) or 'Entity'
return result
def surface_strings(self, entity):
"""Returns list of surface strings for each entity specified in *entity*
as a dictionary.
:param entity: Name of entities to find in normalized texts
:type entity: tuple, list, set or str
:return: dict -- Map where keys are entity names and values are lists
of normalized strings
"""
if self._relations is None or self._relations_entities is None:
raise NoRelevantData('Relevant relation data is not available!')
if not isinstance(entity, (tuple, list, set)):
entity = {entity}
idx = ['doc_id', 'text_order', 'relation_id', 'entity_name']
rels, ents = self._relations, self._relations_entities
ents = ents[ents.entity_name.isin(entity)][idx].drop_duplicates()
grp = pd.merge(rels, ents, on=idx[:3]).groupby('entity_name')
result = {key: [] for key in entity}
result.update({
entity_name: grp.get_group(entity_name)['surface_string'].tolist()
for entity_name in grp.groups
})
return result
def entity_texts(self, entity):
"""Returns list of normalized texts where each *entity* can be found
as a dictionary.
:param entity: Name of entities to find in normalized texts
:type entity: tuple, list, set or str
:return: dict -- Map where keys are concept names and values are lists
of normalized strings
"""
if self._entities is None or self._normalized_texts is None:
raise NoRelevantData('Relevant entity data is not available!')
if not isinstance(entity, (tuple, list, set)):
entity = {entity}
col_filter = ['doc_id', 'text_order', 'entity_name']
ent, texts = self._entities[col_filter], self._normalized_texts
ent = ent[ent.entity_name.isin(entity)].drop_duplicates()
ent_texts = pd.merge(ent, texts, on=['doc_id', 'text_order'])
grp = ent_texts.groupby('entity_name')['normalized_text']
result = {key: [] for key in entity}
result.update({
entity_name: grp.get_group(entity_name).tolist()
for entity_name in grp.groups
})
return result
def entity_sentiment(self, entity):
"""Computes and return mean rating for given entity or entities if list,
tuple or set is given. If input is list or tuple result Series is sorted
as input was.
:param entity: Name(s) of entity(ies) to compute mean sentiment for
:type entity: tuple, list, set or str
:return: pandas.Series -- Mean ratings for entities, np.nan if entity
was not rated. Entity names are in index and their sentiments are
values
"""
if self._relations is None or self._relations_entities is None:
raise NoRelevantData('Relevant relation data is not available!')
idx = ['doc_id', 'text_order', 'relation_id']
rels, ents = self._relations, self._relations_entities
rels = rels[rels.sentiment_value.abs() < 100]
all_ent_evals = pd.merge(rels, ents, on=idx)
if not isinstance(entity, (tuple, list, set)):
entity = {entity}
entity_filter = all_ent_evals.entity_name.isin(set(entity))
entity_evals = all_ent_evals[entity_filter]
means = entity_evals.groupby('entity_name')['sentiment_value'].mean()
means.index.name = 'Entity'
result = means.reindex(list(entity)).rename('Sentiment')
result._plot_id = codes.ENTITY_SENTIMENT
return result
class DatasetLoader(object):
"""Loads analysed data obtained via Anacode API from various formats.
"""
def __init__(self, concepts=None, concepts_surface_strings=None,
categories=None, sentiments=None,
absa_entities=None, absa_normalized_texts=None,
absa_relations=None, absa_relations_entities=None,
absa_evaluations=None, absa_evaluations_entities=None):
"""Will construct DatasetLoader instance that is aware of what data is
available to it. Raises ValueError if no data was provided.
Data frames are expected to have format that corresponds to format that
:class:`anacode.api.writers.Writer` would write.
:param concepts: List of found concepts with metadata
:type concepts: pandas.DataFrame
:param concepts_surface_strings: List of strings realizing concepts
:type concepts_surface_strings: pandas.DataFrame
:param categories: List of document category probabilities
:type categories: pandas.DataFrame
:param sentiments: List of document sentiment polarities
:type sentiments: pandas.DataFrame
:param absa_entities: List of absa entities used in texts
:type absa_entities: pandas.DataFrame
:param absa_normalized_texts: List of Chinese normalized strings identified and analyzed by absa
:type absa_normalized_texts: pandas.DataFrame
:param absa_relations: List of absa relations with metadata
:type absa_relations: pandas.DataFrame
:param absa_relations_entities: List of absa entities used in relations
:type absa_relations_entities: pandas.DataFrame
:param absa_evaluations: List of absa evaluations
:type absa_evaluations: pandas.DataFrame
:param absa_evaluations_entities: List of absa entities used in evaluations
:type absa_evaluations_entities: pandas.DataFrame
"""
self.has_categories = categories is not None
self.has_concepts = concepts is not None or \
concepts_surface_strings is not None
self.has_sentiments = sentiments is not None
self.has_absa = absa_entities is not None or \
absa_normalized_texts is not None or \
absa_relations is not None or \
absa_relations_entities is not None or \
absa_evaluations is not None or \
absa_evaluations_entities is not None
if not (self.has_categories or self.has_concepts or
self.has_sentiments or self.has_absa):
raise ValueError('No data provided. Please provide at least one '
'valid argument')
if self.has_categories:
self._categories = categories
else:
self._categories = None
if self.has_concepts:
self._concepts = concepts
self._concepts_surface_strings = concepts_surface_strings
else:
self._concepts = self._concepts_surface_strings = None
if self.has_sentiments:
self._sentiments = sentiments
else:
self._sentiments = None
if self.has_absa:
self._absa_entities = absa_entities
self._absa_normalized_texts = absa_normalized_texts
self._absa_relations = absa_relations
self._absa_relations_entities = absa_relations_entities
self._absa_evaluations = absa_evaluations
self._absa_evaluations_entities = absa_evaluations_entities
else:
self._absa_entities = self._absa_normalized_texts = None
self._absa_relations = self._absa_relations_entities = None
self._absa_evaluations = self._absa_evaluations_entities = None
def __getitem__(self, item):
"""If item is the name of linguistic dataset known to DatasetLoader,
it will return the corresponding dataset. If the dataset is not found,
None is returned. If item is not recognized, a KeyError is thrown.
:param item: possible values: categories, concepts,
concepts_surface_strings, sentiments, absa_entities,
absa_normalized_texts, absa_relations, absa_relations_entities,
absa_evaluations, absa_evaluations_entities
:type item: str
:return: pandas.DataFrame -- DataFrame with requested data if found, else None
"""
dataset_map = {
'categories': self._categories,
'concepts': self._concepts,
'concepts_surface_strings': self._concepts_surface_strings,
'sentiments': self._sentiments,
'absa_entities': self._absa_entities,
'absa_normalized_texts': self._absa_normalized_texts,
'absa_relations': self._absa_relations,
'absa_relations_entities': self._absa_relations_entities,
'absa_evaluations': self._absa_evaluations,
'absa_evaluations_entities': self._absa_evaluations_entities,
}
if item not in dataset_map:
raise KeyError('Don\'t recognize "{}" dataset'.format(item))
return dataset_map[item]
def remove_concepts(self, concepts):
"""Remove given concepts from dataset if they are present.
:param concepts: These concepts will be removed from dataset
:type concepts: iterable
"""
con = self._concepts
exp = self._concepts_surface_strings
concepts = set(concepts)
if con is not None:
self._concepts = con[con.concept.isin(concepts) == False]
if exp is not None:
exp = exp[exp.concept.isin(concepts) == False]
self._concepts_surface_strings = exp
@property
def concepts(self):
"""Creates new ConceptsDataset if data is available.
:return: :class:`anacode.agg.aggregations.ConceptsDataset` --
"""
if self.has_concepts:
return ConceptsDataset(self._concepts,
self._concepts_surface_strings)
else:
raise NoRelevantData('Concepts data not available!')
@property
def categories(self):
"""Creates new CategoriesDataset if data is available.
:return: :class:`anacode.agg.aggregations.CategoriesDataset` --
"""
if self.has_categories:
return CategoriesDataset(self._categories)
else:
raise NoRelevantData('Categories data not available!')
@property
def sentiments(self):
"""Creates new SentimentDataset if data is available.
:return: :class:`anacode.agg.aggregations.SentimentDataset` --
"""
if self.has_sentiments:
return SentimentDataset(self._sentiments)
else:
raise NoRelevantData('Sentiment data is not available!')
@property
def absa(self):
"""Creates new ABSADataset if data is available.
:return: :class:`anacode.agg.aggregations.ABSADataset` --
"""
if self.has_absa:
return ABSADataset(
self._absa_entities, self._absa_normalized_texts,
self._absa_relations, self._absa_relations_entities,
self._absa_evaluations, self._absa_evaluations_entities,
)
else:
raise NoRelevantData('ABSA data is not available!')
@classmethod
def from_path(cls, path, backup_suffix=''):
"""Initializes DatasetLoader from AnacodeAPI csv files present in given
path. You could have obtained these by using
:class:`anacode.api.writers.CSVWriter` to write your request results
when you were querying AnacodeAPI.
:param path: Path to folder where AnacodeAPI analysis is stored in csv
files
:type path: str
:param backup_suffix: If you want to load older dataset from file that
has been backed up by toolkit, use this to specify suffix of file names
:type backup_suffix: str
:return: :class:`anacode.agg.DatasetLoader` -- DatasetLoader with found
csv files loaded into data frames
"""
log = logging.getLogger(__name__)
log.debug('Going to init ApiDataset from path %s', path)
join = os.path.join
path_contents = set(os.listdir(path))
log.debug('Found files: %s', path_contents)
kwargs = {}
loaded = []
for call, files in CSV_FILES.items():
for file_name in files:
name = file_name[:-4]
if backup_suffix:
file_name = '%s_%s' % (file_name, backup_suffix)
file_path = join(path, file_name)
if os.path.isfile(file_path):
kwargs[name] = pd.read_csv(file_path)
loaded.append(name)
else:
kwargs[name] = None
if len(loaded) == 0:
raise ValueError('No relevant csv files in %s', path)
else:
log.info('Loaded %d csv files', len(loaded))
log.debug('Loaded csv files are: %s', loaded)
return cls(**kwargs)
@classmethod
def from_writer(cls, writer):
"""Initializes DatasetLoader from writer instance that was used to store
anacode analysis. Accepts both
:class:`anacode.api.writers.DataFrameWriter` and
:class:`anacode.api.writers.CSVWriter`.
:param writer: Writer that was used by
:class:`anacode.api.client.Analyzer` to store analysis
:type writer: anacode.api.writers.Writer
:return: :class:`anacode.agg.DatasetLoader` -- DatasetLoader with
available data frames loaded
"""
if isinstance(writer, writers.CSVWriter):
return cls.from_path(writer.target_dir)
elif isinstance(writer, writers.DataFrameWriter):
return cls(**writer.frames)
else:
raise ValueError('{} class not supported'.format(type(writer)))
@classmethod
def from_api_result(cls, result):
"""Initializes DatasetLoader from API JSON output. Works with both
single analysis result and with list of analyses results.
:param result: Either single API JSON analysis dict or list of them
:return: :class:`anacode.agg.DatasetLoader` -- DatasetLoader with
available analysis data loaded
"""
frame_writer = writers.DataFrameWriter()
frame_writer.init()
if isinstance(result, list):
for analysis in result:
frame_writer.write_analysis(analysis)
else:
frame_writer.write_analysis(result)
frame_writer.close()
return cls(**frame_writer.frames)
def filter(self, document_ids):
"""Creates new DatasetLoader instance using data only from documents
with ids in *document_ids*.
:param document_ids: Iterable with document ids. Cannot be empty.
:type document_ids: iterable
:return: DatasetLoader -- New DatasetLoader instance with data only from
desired documents
"""
document_ids = set(document_ids)
if len(document_ids) == 0:
raise ValueError('Can\'t use empty filter')
def f(frame):
if frame is None:
return None
return frame[frame.doc_id.isin(document_ids)]
return DatasetLoader(
concepts=f(self._concepts),
concepts_surface_strings=f(self._concepts_surface_strings),
categories=f(self._categories), sentiments=f(self._sentiments),
absa_entities=f(self._absa_entities),
absa_normalized_texts=f(self._absa_normalized_texts),
absa_evaluations=f(self._absa_evaluations),
absa_evaluations_entities=f(self._absa_evaluations_entities),
absa_relations=f(self._absa_relations),
absa_relations_entities=f(self._absa_relations_entities),
)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for folding batch norm layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import fold_batch_norms
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
batch_norm = layers.batch_norm
conv2d = layers.conv2d
fully_connected = layers.fully_connected
separable_conv2d = layers.separable_conv2d
_DEFAULT_BATCH_NORM_PARAMS = {
'center': True,
'scale': True,
'decay': 1.0 - 0.003,
'fused': False,
}
# TODO(suharshs): Use parameterized test once OSS TF supports it.
class FoldBatchNormsTest(test_util.TensorFlowTestCase):
def _RunTestOverParameters(self, test_fn):
parameters_list = [
# (relu, relu_op_name, with_bypass)
(nn_ops.relu6, 'Relu6', False),
(nn_ops.relu, 'Relu', False),
(nn_ops.relu6, 'Relu6', True),
(nn_ops.relu, 'Relu', True),
]
for parameters in parameters_list:
test_fn(parameters[0], parameters[1], parameters[2])
def testFailsWithFusedBatchNorm(self):
self._RunTestOverParameters(self._TestFailsWithFusedBatchNorm)
def _TestFailsWithFusedBatchNorm(self, relu, relu_op_name, with_bypass):
"""Tests that batch norm fails when fused batch norm ops are present."""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
batch_norm_params = _DEFAULT_BATCH_NORM_PARAMS.copy()
batch_norm_params['fused'] = True
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(inputs, out_depth, [5, 5], stride=stride, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=batch_norm_params,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
with self.assertRaises(ValueError):
fold_batch_norms.FoldBatchNorms(g)
def _TestFoldConv2d(self, relu, relu_op_name, with_bypass):
"""Tests folding cases: inputs -> Conv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(inputs, out_depth, [5, 5], stride=stride, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=_DEFAULT_BATCH_NORM_PARAMS,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(g)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul,
[scope + '/weights/read',
scope + '/BatchNorm/batchnorm/mul'])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/convolution_Fold'])
folded_conv = g.get_operation_by_name(scope + '/convolution_Fold')
self.assertEqual(folded_conv.type, 'Conv2D')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/add_fold'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add,
[scope + '/convolution_Fold',
scope + '/BatchNorm/batchnorm/sub'])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
def testFoldConv2d(self):
self._RunTestOverParameters(self._TestFoldConv2d)
def _TestFoldConv2dUnknownShape(self, relu, relu_op_name, with_bypass):
"""Tests folding cases: inputs -> Conv2d with batch norm -> Relu*.
Tests that folding works even with an input shape where some dimensions are
not known (i.e. None).
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
"""
g = ops.Graph()
with g.as_default():
inputs = array_ops.placeholder(dtypes.float32, shape=(5, None, None, 3))
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=_DEFAULT_BATCH_NORM_PARAMS,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(g)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul, [
scope + '/weights/read', scope + '/BatchNorm/batchnorm/mul'
])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/convolution_Fold'])
folded_conv = g.get_operation_by_name(scope + '/convolution_Fold')
self.assertEqual(folded_conv.type, 'Conv2D')
self._AssertInputOpsAre(folded_conv, [scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/add_fold'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
scope + '/convolution_Fold', scope + '/BatchNorm/batchnorm/sub'
])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
def testFoldConv2dUnknownShape(self):
self._RunTestOverParameters(self._TestFoldConv2dUnknownShape)
def _TestFoldConv2dWithoutScale(self, relu, relu_op_name, with_bypass):
"""Tests folding cases: inputs -> Conv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
bn_params = copy.copy(_DEFAULT_BATCH_NORM_PARAMS)
bn_params['scale'] = False
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(inputs, out_depth, [5, 5], stride=stride, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=bn_params,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(g)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul,
[scope + '/weights/read',
scope + '/BatchNorm/batchnorm/Rsqrt'])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/convolution_Fold'])
folded_conv = g.get_operation_by_name(scope + '/convolution_Fold')
self.assertEqual(folded_conv.type, 'Conv2D')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/add_fold'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add,
[scope + '/convolution_Fold',
scope + '/BatchNorm/batchnorm/sub'])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
def testFoldConv2dWithoutScale(self):
self._RunTestOverParameters(self._TestFoldConv2dWithoutScale)
def _TestFoldFullyConnectedLayer(self, relu, relu_op_name, with_bypass):
"""Tests folding cases: inputs -> FC with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
"""
g = ops.Graph()
with g.as_default():
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = fully_connected(inputs, out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=_DEFAULT_BATCH_NORM_PARAMS,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(g)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul,
[scope + '/weights/read',
scope + '/BatchNorm/batchnorm/mul'])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/MatMul_Fold'])
folded_conv = g.get_operation_by_name(scope + '/MatMul_Fold')
self.assertEqual(folded_conv.type, 'MatMul')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/add_fold'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add,
[scope + '/MatMul_Fold',
scope + '/BatchNorm/batchnorm/sub'])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
def testFoldFullyConnectedLayer(self):
self._RunTestOverParameters(self._TestFoldFullyConnectedLayer)
def _TestFoldFullyConnectedLayerWithoutScale(self, relu, relu_op_name,
with_bypass):
"""Tests folding cases: inputs -> FC with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
"""
g = ops.Graph()
with g.as_default():
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
activation_fn = None if with_bypass else relu
bn_params = copy.copy(_DEFAULT_BATCH_NORM_PARAMS)
bn_params['scale'] = False
scope = 'test/test2' if with_bypass else 'test'
node = fully_connected(inputs, out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=bn_params,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(g)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul,
[scope + '/weights/read',
scope + '/BatchNorm/batchnorm/Rsqrt'])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/MatMul_Fold'])
folded_conv = g.get_operation_by_name(scope + '/MatMul_Fold')
self.assertEqual(folded_conv.type, 'MatMul')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/add_fold'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add,
[scope + '/MatMul_Fold',
scope + '/BatchNorm/batchnorm/sub'])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
def testFoldFullyConnectedLayerWithoutScale(self):
self._RunTestOverParameters(self._TestFoldFullyConnectedLayerWithoutScale)
def _TestFoldDepthwiseConv2d(self, relu, relu_op_name, with_bypass):
"""Tests folding: inputs -> DepthwiseConv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = separable_conv2d(inputs, None, [5, 5], stride=stride,
depth_multiplier=1.0, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=_DEFAULT_BATCH_NORM_PARAMS,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(g)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul,
[scope + '/depthwise_weights/read',
scope + '/scale_reshape'])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/depthwise_Fold'])
scale_reshape = g.get_operation_by_name(scope + '/scale_reshape')
self.assertEqual(scale_reshape.type, 'Reshape')
self._AssertInputOpsAre(scale_reshape,
[scope + '/BatchNorm/batchnorm/mul',
scope + '/scale_reshape/shape'])
self._AssertOutputGoesToOps(scale_reshape, g, [scope + '/mul_fold'])
folded_conv = g.get_operation_by_name(scope + '/depthwise_Fold')
self.assertEqual(folded_conv.type, 'DepthwiseConv2dNative')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/add_fold'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add,
[scope + '/depthwise_Fold',
scope + '/BatchNorm/batchnorm/sub'])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
def testFoldDepthwiseConv2d(self):
self._RunTestOverParameters(self._TestFoldDepthwiseConv2d)
def _TestFoldDepthwiseConv2dWithoutScale(self, relu, relu_op_name,
with_bypass):
"""Tests folding: inputs -> DepthwiseConv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
bn_params = copy.copy(_DEFAULT_BATCH_NORM_PARAMS)
bn_params['scale'] = False
scope = 'test/test2' if with_bypass else 'test'
node = separable_conv2d(inputs, None, [5, 5], stride=stride,
depth_multiplier=1.0, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=bn_params,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(g)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul,
[scope + '/depthwise_weights/read',
scope + '/scale_reshape'])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/depthwise_Fold'])
scale_reshape = g.get_operation_by_name(scope + '/scale_reshape')
self.assertEqual(scale_reshape.type, 'Reshape')
self._AssertInputOpsAre(scale_reshape,
[scope + '/BatchNorm/batchnorm/Rsqrt',
scope + '/scale_reshape/shape'])
self._AssertOutputGoesToOps(scale_reshape, g, [scope + '/mul_fold'])
folded_conv = g.get_operation_by_name(scope + '/depthwise_Fold')
self.assertEqual(folded_conv.type, 'DepthwiseConv2dNative')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/add_fold'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add,
[scope + '/depthwise_Fold',
scope + '/BatchNorm/batchnorm/sub'])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
def testFoldDepthwiseConv2dWithoutScale(self):
self._RunTestOverParameters(self._TestFoldDepthwiseConv2dWithoutScale)
def _WeightInit(self, stddev):
"""Returns a truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initializer that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
def _AssertInputOpsAre(self, op, in_op_names):
"""Asserts that all inputs to op come from in_op_names (disregarding order).
Args:
op: Operation to check inputs for.
in_op_names: List of strings, operations where all op's inputs should
come from.
"""
expected_inputs = [in_op_name + ':0' for in_op_name in in_op_names]
self.assertItemsEqual([t.name for t in op.inputs], expected_inputs)
def _AssertOutputGoesToOps(self, op, graph, out_op_names):
"""Asserts that outputs from op go to out_op_names (and perhaps others).
Args:
op: Operation to check outputs for.
graph: Graph where output operations are located.
out_op_names: List of strings, operations where op's outputs should go.
"""
for out_op_name in out_op_names:
out_op = graph.get_operation_by_name(out_op_name)
self.assertIn(op.outputs[0].name, [str(t.name) for t in out_op.inputs])
if __name__ == '__main__':
googletest.main()
|
|
#!/usr/bin/env python
# This example demonstrates the use of the vtkTransformPolyDataFilter
# to reposition a 3D text string.
import vtk
from vtk.util.colors import *
# Define a Single Cube
Scalars = vtk.vtkFloatArray()
Scalars.InsertNextValue(1.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(1.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Points = vtk.vtkPoints()
Points.InsertNextPoint(0, 0, 0)
Points.InsertNextPoint(1, 0, 0)
Points.InsertNextPoint(1, 1, 0)
Points.InsertNextPoint(0, 1, 0)
Points.InsertNextPoint(0, 0, 1)
Points.InsertNextPoint(1, 0, 1)
Points.InsertNextPoint(1, 1, 1)
Points.InsertNextPoint(0, 1, 1)
Ids = vtk.vtkIdList()
Ids.InsertNextId(0)
Ids.InsertNextId(1)
Ids.InsertNextId(2)
Ids.InsertNextId(3)
Ids.InsertNextId(4)
Ids.InsertNextId(5)
Ids.InsertNextId(6)
Ids.InsertNextId(7)
Grid = vtk.vtkUnstructuredGrid()
Grid.Allocate(10, 10)
Grid.InsertNextCell(12, Ids)
Grid.SetPoints(Points)
Grid.GetPointData().SetScalars(Scalars)
# Find the triangles that lie along the 0.5 contour in this cube.
Marching = vtk.vtkContourFilter()
Marching.SetInputData(Grid)
Marching.SetValue(0, 0.5)
Marching.Update()
# Extract the edges of the triangles just found.
triangleEdges = vtk.vtkExtractEdges()
triangleEdges.SetInputConnection(Marching.GetOutputPort())
# Draw the edges as tubes instead of lines. Also create the associated
# mapper and actor to display the tubes.
triangleEdgeTubes = vtk.vtkTubeFilter()
triangleEdgeTubes.SetInputConnection(triangleEdges.GetOutputPort())
triangleEdgeTubes.SetRadius(.005)
triangleEdgeTubes.SetNumberOfSides(6)
triangleEdgeTubes.UseDefaultNormalOn()
triangleEdgeTubes.SetDefaultNormal(.577, .577, .577)
triangleEdgeMapper = vtk.vtkPolyDataMapper()
triangleEdgeMapper.SetInputConnection(triangleEdgeTubes.GetOutputPort())
triangleEdgeMapper.ScalarVisibilityOff()
triangleEdgeActor = vtk.vtkActor()
triangleEdgeActor.SetMapper(triangleEdgeMapper)
triangleEdgeActor.GetProperty().SetDiffuseColor(lamp_black)
triangleEdgeActor.GetProperty().SetSpecular(.4)
triangleEdgeActor.GetProperty().SetSpecularPower(10)
# Shrink the triangles we found earlier. Create the associated mapper
# and actor. Set the opacity of the shrunken triangles.
aShrinker = vtk.vtkShrinkPolyData()
aShrinker.SetShrinkFactor(1)
aShrinker.SetInputConnection(Marching.GetOutputPort())
aMapper = vtk.vtkPolyDataMapper()
aMapper.ScalarVisibilityOff()
aMapper.SetInputConnection(aShrinker.GetOutputPort())
Triangles = vtk.vtkActor()
Triangles.SetMapper(aMapper)
Triangles.GetProperty().SetDiffuseColor(banana)
Triangles.GetProperty().SetOpacity(.6)
# Draw a cube the same size and at the same position as the one
# created previously. Extract the edges because we only want to see
# the outline of the cube. Pass the edges through a vtkTubeFilter so
# they are displayed as tubes rather than lines.
CubeModel = vtk.vtkCubeSource()
CubeModel.SetCenter(.5, .5, .5)
Edges = vtk.vtkExtractEdges()
Edges.SetInputConnection(CubeModel.GetOutputPort())
Tubes = vtk.vtkTubeFilter()
Tubes.SetInputConnection(Edges.GetOutputPort())
Tubes.SetRadius(.01)
Tubes.SetNumberOfSides(6)
Tubes.UseDefaultNormalOn()
Tubes.SetDefaultNormal(.577, .577, .577)
# Create the mapper and actor to display the cube edges.
TubeMapper = vtk.vtkPolyDataMapper()
TubeMapper.SetInputConnection(Tubes.GetOutputPort())
CubeEdges = vtk.vtkActor()
CubeEdges.SetMapper(TubeMapper)
CubeEdges.GetProperty().SetDiffuseColor(khaki)
CubeEdges.GetProperty().SetSpecular(.4)
CubeEdges.GetProperty().SetSpecularPower(10)
# Create a sphere to use as a glyph source for vtkGlyph3D.
Sphere = vtk.vtkSphereSource()
Sphere.SetRadius(0.04)
Sphere.SetPhiResolution(20)
Sphere.SetThetaResolution(20)
# Remove the part of the cube with data values below 0.5.
ThresholdIn = vtk.vtkThresholdPoints()
ThresholdIn.SetInputData(Grid)
ThresholdIn.ThresholdByUpper(.5)
# Display spheres at the vertices remaining in the cube data set after
# it was passed through vtkThresholdPoints.
Vertices = vtk.vtkGlyph3D()
Vertices.SetInputConnection(ThresholdIn.GetOutputPort())
Vertices.SetSourceConnection(Sphere.GetOutputPort())
# Create a mapper and actor to display the glyphs.
SphereMapper = vtk.vtkPolyDataMapper()
SphereMapper.SetInputConnection(Vertices.GetOutputPort())
SphereMapper.ScalarVisibilityOff()
CubeVertices = vtk.vtkActor()
CubeVertices.SetMapper(SphereMapper)
CubeVertices.GetProperty().SetDiffuseColor(tomato)
CubeVertices.GetProperty().SetDiffuseColor(tomato)
# Define the text for the label
caseLabel = vtk.vtkVectorText()
caseLabel.SetText("Case 1")
# Set up a transform to move the label to a new position.
aLabelTransform = vtk.vtkTransform()
aLabelTransform.Identity()
aLabelTransform.Translate(-0.2, 0, 1.25)
aLabelTransform.Scale(.05, .05, .05)
# Move the label to a new position.
labelTransform = vtk.vtkTransformPolyDataFilter()
labelTransform.SetTransform(aLabelTransform)
labelTransform.SetInputConnection(caseLabel.GetOutputPort())
# Create a mapper and actor to display the text.
labelMapper = vtk.vtkPolyDataMapper()
labelMapper.SetInputConnection(labelTransform.GetOutputPort())
labelActor = vtk.vtkActor()
labelActor.SetMapper(labelMapper)
# Define the base that the cube sits on. Create its associated mapper
# and actor. Set the position of the actor.
baseModel = vtk.vtkCubeSource()
baseModel.SetXLength(1.5)
baseModel.SetYLength(.01)
baseModel.SetZLength(1.5)
baseMapper = vtk.vtkPolyDataMapper()
baseMapper.SetInputConnection(baseModel.GetOutputPort())
base = vtk.vtkActor()
base.SetMapper(baseMapper)
base.SetPosition(.5, -0.09, .5)
# Create the Renderer, RenderWindow, and RenderWindowInteractor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(640, 480)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer
ren.AddActor(triangleEdgeActor)
ren.AddActor(base)
ren.AddActor(labelActor)
ren.AddActor(CubeEdges)
ren.AddActor(CubeVertices)
ren.AddActor(Triangles)
# Set the background color.
ren.SetBackground(slate_grey)
# This sets up the right values for case12 of the marching cubes
# algorithm (routine translated from vtktesting/mccases.tcl).
def case12(scalars, caselabel, IN, OUT):
scalars.InsertValue(0, OUT)
scalars.InsertValue(1, IN)
scalars.InsertValue(2, OUT)
scalars.InsertValue(3, IN)
scalars.InsertValue(4, IN)
scalars.InsertValue(5, IN)
scalars.InsertValue(6, OUT)
scalars.InsertValue(7, OUT)
if IN == 1:
caselabel.SetText("Case 12 - 00111010")
else:
caselabel.SetText("Case 12 - 11000101")
# Set the scalar values for this case of marching cubes.
case12(Scalars, caseLabel, 0, 1)
# Force the grid to update.
Grid.Modified()
# Position the camera.
ren.ResetCamera()
ren.GetActiveCamera().Dolly(1.2)
ren.GetActiveCamera().Azimuth(30)
ren.GetActiveCamera().Elevation(20)
ren.ResetCameraClippingRange()
iren.Initialize()
renWin.Render()
iren.Start()
|
|
"""Matrix factorization with Sparse PCA"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import numpy as np
from ..utils import check_random_state, array2d
from ..linear_model import ridge_regression
from ..base import BaseEstimator, TransformerMixin
from .dict_learning import dict_learning, dict_learning_online
class SparsePCA(BaseEstimator, TransformerMixin):
"""Sparse Principal Components Analysis (SparsePCA)
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
Number of sparse atoms to extract.
alpha : float,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run.
U_init : array of shape (n_samples, n_components),
Initial values for the loadings for warm restart scenarios.
V_init : array of shape (n_components, n_features),
Initial values for the components for warm restart scenarios.
verbose :
Degree of verbosity of the printed output.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
Sparse components extracted from the data.
`error_` : array
Vector of errors at each iteration.
See also
--------
PCA
MiniBatchSparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
max_iter=1000, tol=1e-8, method='lars', n_jobs=1, U_init=None,
V_init=None, verbose=False, random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.max_iter = max_iter
self.tol = tol
self.method = method
self.n_jobs = n_jobs
self.U_init = U_init
self.V_init = V_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
Vt, _, E = dict_learning(X.T, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.method, n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=random_state,
code_init=code_init,
dict_init=dict_init)
self.components_ = Vt.T
self.error_ = E
return self
def transform(self, X, ridge_alpha=None):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X: array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
ridge_alpha: float, default: 0.01
Amount of ridge shrinkage to apply in order to improve
conditioning.
Returns
-------
X_new array, shape (n_samples, n_components)
Transformed data.
"""
ridge_alpha = self.ridge_alpha if ridge_alpha is None else ridge_alpha
U = ridge_regression(self.components_.T, X.T, ridge_alpha,
solver='cholesky')
s = np.sqrt((U ** 2).sum(axis=0))
s[s == 0] = 1
U /= s
return U
class MiniBatchSparsePCA(SparsePCA):
"""Mini-batch Sparse Principal Components Analysis
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
number of sparse atoms to extract
alpha : int,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
n_iter : int,
number of iterations to perform for each mini batch
callback : callable,
callable that gets invoked every five iterations
batch_size : int,
the number of features to take in each mini batch
verbose :
degree of output the procedure will print
shuffle : boolean,
whether to shuffle the data before splitting it in batches
n_jobs : int,
number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
Sparse components extracted from the data.
`error_` : array
Vector of errors at each iteration.
See also
--------
PCA
SparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
n_iter=100, callback=None, batch_size=3, verbose=False,
shuffle=True, n_jobs=1, method='lars', random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.n_iter = n_iter
self.callback = callback
self.batch_size = batch_size
self.verbose = verbose
self.shuffle = shuffle
self.n_jobs = n_jobs
self.method = method
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
Vt, _ = dict_learning_online(X.T, n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=True,
dict_init=None, verbose=self.verbose,
callback=self.callback,
batch_size=self.batch_size,
shuffle=self.shuffle,
n_jobs=self.n_jobs, method=self.method,
random_state=random_state)
self.components_ = Vt.T
return self
|
|
import math
import unittest
from monty.serialization import MontyDecoder
from pymatgen.core.operations import SymmOp
from pymatgen.core.tensors import *
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.testing import PymatgenTest
class TensorTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
self.vec = Tensor([1.0, 0.0, 0.0])
self.rand_rank2 = Tensor(np.random.randn(3, 3))
self.rand_rank3 = Tensor(np.random.randn(3, 3, 3))
self.rand_rank4 = Tensor(np.random.randn(3, 3, 3, 3))
a = 3.14 * 42.5 / 180
self.non_symm = SquareTensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.2, 0.5, 0.5]])
self.rotation = SquareTensor([[math.cos(a), 0, math.sin(a)], [0, 1, 0], [-math.sin(a), 0, math.cos(a)]])
self.low_val = Tensor([[1e-6, 1 + 1e-5, 1e-6], [1 + 1e-6, 1e-6, 1e-6], [1e-7, 1e-7, 1 + 1e-5]])
self.symm_rank2 = Tensor([[1, 2, 3], [2, 4, 5], [3, 5, 6]])
self.symm_rank3 = Tensor(
[
[[1, 2, 3], [2, 4, 5], [3, 5, 6]],
[[2, 4, 5], [4, 7, 8], [5, 8, 9]],
[[3, 5, 6], [5, 8, 9], [6, 9, 10]],
]
)
self.symm_rank4 = Tensor(
[
[
[[1.2, 0.4, -0.92], [0.4, 0.05, 0.11], [-0.92, 0.11, -0.02]],
[[0.4, 0.05, 0.11], [0.05, -0.47, 0.09], [0.11, 0.09, -0.0]],
[[-0.92, 0.11, -0.02], [0.11, 0.09, 0.0], [-0.02, 0.0, -0.3]],
],
[
[[0.4, 0.05, 0.11], [0.05, -0.47, 0.09], [0.11, 0.09, 0.0]],
[[0.05, -0.47, 0.09], [-0.47, 0.17, 0.62], [0.09, 0.62, 0.3]],
[[0.11, 0.09, 0.0], [0.09, 0.62, 0.3], [0.0, 0.3, -0.18]],
],
[
[[-0.92, 0.11, -0.02], [0.11, 0.09, 0.0], [-0.02, 0, -0.3]],
[[0.11, 0.09, 0.0], [0.09, 0.62, 0.3], [0.0, 0.3, -0.18]],
[[-0.02, 0.0, -0.3], [0.0, 0.3, -0.18], [-0.3, -0.18, -0.51]],
],
]
)
# Structural symmetries tested using BaNiO3 piezo/elastic tensors
self.fit_r3 = Tensor(
[
[[0.0, 0.0, 0.03839], [0.0, 0.0, 0.0], [0.03839, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.03839], [0.0, 0.03839, 0.0]],
[[6.89822, 0.0, 0.0], [0.0, 6.89822, 0.0], [0.0, 0.0, 27.4628]],
]
)
self.fit_r4 = Tensor(
[
[
[[157.9, 0.0, 0.0], [0.0, 63.1, 0.0], [0.0, 0.0, 29.4]],
[[0.0, 47.4, 0.0], [47.4, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 4.3], [0.0, 0.0, 0.0], [4.3, 0.0, 0.0]],
],
[
[[0.0, 47.4, 0.0], [47.4, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[63.1, 0.0, 0.0], [0.0, 157.9, 0.0], [0.0, 0.0, 29.4]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 4.3], [0.0, 4.3, 0.0]],
],
[
[[0.0, 0.0, 4.3], [0.0, 0.0, 0.0], [4.3, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 4.3], [0.0, 4.3, 0.0]],
[[29.4, 0.0, 0.0], [0.0, 29.4, 0.0], [0.0, 0.0, 207.6]],
],
]
)
self.unfit4 = Tensor(
[
[
[[161.26, 0.0, 0.0], [0.0, 62.76, 0.0], [0.0, 0.0, 30.18]],
[[0.0, 47.08, 0.0], [47.08, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 4.23], [0.0, 0.0, 0.0], [4.23, 0.0, 0.0]],
],
[
[[0.0, 47.08, 0.0], [47.08, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[62.76, 0.0, 0.0], [0.0, 155.28, -0.06], [0.0, -0.06, 28.53]],
[[0.0, 0.0, 0.0], [0.0, -0.06, 4.44], [0.0, 4.44, 0.0]],
],
[
[[0.0, 0.0, 4.23], [0.0, 0.0, 0.0], [4.23, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, -0.06, 4.44], [0.0, 4.44, 0.0]],
[[30.18, 0.0, 0.0], [0.0, 28.53, 0.0], [0.0, 0.0, 207.57]],
],
]
)
self.structure = self.get_structure("BaNiO3")
ieee_file_path = os.path.join(PymatgenTest.TEST_FILES_DIR, "ieee_conversion_data.json")
self.ones = Tensor(np.ones((3, 3)))
self.ieee_data = loadfn(ieee_file_path)
def test_new(self):
bad_2 = np.zeros((4, 4))
bad_3 = np.zeros((4, 4, 4))
self.assertRaises(ValueError, Tensor, bad_2)
self.assertRaises(ValueError, Tensor, bad_3)
self.assertEqual(self.rand_rank2.rank, 2)
self.assertEqual(self.rand_rank3.rank, 3)
self.assertEqual(self.rand_rank4.rank, 4)
def test_zeroed(self):
self.assertArrayEqual(
self.low_val.zeroed(),
Tensor([[0, 1 + 1e-5, 0], [1 + 1e-6, 0, 0], [0, 0, 1 + 1e-5]]),
)
self.assertArrayEqual(
self.low_val.zeroed(tol=1e-6),
Tensor([[1e-6, 1 + 1e-5, 1e-6], [1 + 1e-6, 1e-6, 1e-6], [0, 0, 1 + 1e-5]]),
)
self.assertArrayEqual(
Tensor([[1e-6, -30, 1], [1e-7, 1, 0], [1e-8, 0, 1]]).zeroed(),
Tensor([[0, -30, 1], [0, 1, 0], [0, 0, 1]]),
)
def test_transform(self):
# Rank 3
tensor = Tensor(np.arange(0, 27).reshape(3, 3, 3))
symm_op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, [0, 0, 1])
new_tensor = tensor.transform(symm_op)
self.assertArrayAlmostEqual(
new_tensor,
[
[
[-0.871, -2.884, -1.928],
[-2.152, -6.665, -4.196],
[-1.026, -2.830, -1.572],
],
[
[0.044, 1.531, 1.804],
[4.263, 21.008, 17.928],
[5.170, 23.026, 18.722],
],
[
[1.679, 7.268, 5.821],
[9.268, 38.321, 29.919],
[8.285, 33.651, 26.000],
],
],
3,
)
def test_rotate(self):
self.assertArrayEqual(self.vec.rotate([[0, -1, 0], [1, 0, 0], [0, 0, 1]]), [0, 1, 0])
self.assertArrayAlmostEqual(
self.non_symm.rotate(self.rotation),
SquareTensor([[0.531, 0.485, 0.271], [0.700, 0.5, 0.172], [0.171, 0.233, 0.068]]),
decimal=3,
)
self.assertRaises(ValueError, self.non_symm.rotate, self.symm_rank2)
def test_einsum_sequence(self):
x = [1, 0, 0]
test = Tensor(np.arange(0, 3 ** 4).reshape((3, 3, 3, 3)))
self.assertArrayAlmostEqual([0, 27, 54], test.einsum_sequence([x] * 3))
self.assertEqual(360, test.einsum_sequence([np.eye(3)] * 2))
self.assertRaises(ValueError, test.einsum_sequence, Tensor(np.zeros(3)))
def test_symmetrized(self):
self.assertTrue(self.rand_rank2.symmetrized.is_symmetric())
self.assertTrue(self.rand_rank3.symmetrized.is_symmetric())
self.assertTrue(self.rand_rank4.symmetrized.is_symmetric())
def test_is_symmetric(self):
self.assertTrue(self.symm_rank2.is_symmetric())
self.assertTrue(self.symm_rank3.is_symmetric())
self.assertTrue(self.symm_rank4.is_symmetric())
tol_test = self.symm_rank4
tol_test[0, 1, 2, 2] += 1e-6
self.assertFalse(self.low_val.is_symmetric(tol=1e-8))
def test_fit_to_structure(self):
new_fit = self.unfit4.fit_to_structure(self.structure)
self.assertArrayAlmostEqual(new_fit, self.fit_r4, 1)
def test_is_fit_to_structure(self):
self.assertFalse(self.unfit4.is_fit_to_structure(self.structure))
self.assertTrue(self.fit_r3.is_fit_to_structure(self.structure))
self.assertTrue(self.fit_r4.is_fit_to_structure(self.structure))
def test_convert_to_ieee(self):
for entry in self.ieee_data:
xtal = entry["xtal"]
struct = entry["structure"]
orig = Tensor(entry["original_tensor"])
ieee = Tensor(entry["ieee_tensor"])
diff = np.max(abs(ieee - orig.convert_to_ieee(struct)))
err_msg = "{} IEEE conversion failed with max diff {}. " "Numpy version: {}".format(
xtal, diff, np.__version__
)
converted = orig.convert_to_ieee(struct, refine_rotation=False)
self.assertArrayAlmostEqual(ieee, converted, err_msg=err_msg, decimal=3)
converted_refined = orig.convert_to_ieee(struct, refine_rotation=True)
err_msg = "{} IEEE conversion with refinement failed with max diff {}. " "Numpy version: {}".format(
xtal, diff, np.__version__
)
self.assertArrayAlmostEqual(ieee, converted_refined, err_msg=err_msg, decimal=2)
def test_structure_transform(self):
# Test trivial case
trivial = self.fit_r4.structure_transform(self.structure, self.structure.copy())
self.assertArrayAlmostEqual(trivial, self.fit_r4)
# Test simple rotation
rot_symm_op = SymmOp.from_axis_angle_and_translation([1, 1, 1], 55.5)
rot_struct = self.structure.copy()
rot_struct.apply_operation(rot_symm_op)
rot_tensor = self.fit_r4.rotate(rot_symm_op.rotation_matrix)
trans_tensor = self.fit_r4.structure_transform(self.structure, rot_struct)
self.assertArrayAlmostEqual(rot_tensor, trans_tensor)
# Test supercell
bigcell = self.structure.copy()
bigcell.make_supercell([2, 2, 3])
trans_tensor = self.fit_r4.structure_transform(self.structure, bigcell)
self.assertArrayAlmostEqual(self.fit_r4, trans_tensor)
# Test rotated primitive to conventional for fcc structure
sn = self.get_structure("Sn")
sn_prim = SpacegroupAnalyzer(sn).get_primitive_standard_structure()
sn_prim.apply_operation(rot_symm_op)
rotated = self.fit_r4.rotate(rot_symm_op.rotation_matrix)
transformed = self.fit_r4.structure_transform(sn, sn_prim)
self.assertArrayAlmostEqual(rotated, transformed)
def test_from_voigt(self):
with self.assertRaises(ValueError):
Tensor.from_voigt(
[
[59.33, 28.08, 28.08, 0],
[28.08, 59.31, 28.07, 0],
[28.08, 28.07, 59.32, 0, 0],
[0, 0, 0, 26.35, 0],
[0, 0, 0, 0, 26.35],
]
)
# Rank 4
Tensor.from_voigt(
[
[59.33, 28.08, 28.08, 0, 0, 0],
[28.08, 59.31, 28.07, 0, 0, 0],
[28.08, 28.07, 59.32, 0, 0, 0],
[0, 0, 0, 26.35, 0, 0],
[0, 0, 0, 0, 26.35, 0],
[0, 0, 0, 0, 0, 26.35],
]
)
# Rank 3
Tensor.from_voigt(np.zeros((3, 6)))
# Rank 2
Tensor.from_voigt(np.zeros(6))
# Addresses occasional cast issues for integers
Tensor.from_voigt(np.arange(6))
def test_symmetry_reduce(self):
tbs = [Tensor.from_voigt(row) for row in np.eye(6) * 0.01]
reduced = symmetry_reduce(tbs, self.get_structure("Sn"))
self.assertEqual(len(reduced), 2)
self.assertArrayEqual([len(i) for i in reduced.values()], [2, 2])
reconstructed = []
for k, v in reduced.items():
reconstructed.extend([k.voigt] + [k.transform(op).voigt for op in v])
reconstructed = sorted(reconstructed, key=lambda x: np.argmax(x))
self.assertArrayAlmostEqual([tb for tb in reconstructed], np.eye(6) * 0.01)
def test_tensor_mapping(self):
# Test get
tbs = [Tensor.from_voigt(row) for row in np.eye(6) * 0.01]
reduced = symmetry_reduce(tbs, self.get_structure("Sn"))
tkey = Tensor.from_values_indices([0.01], [(0, 0)])
tval = reduced[tkey]
for tens_1, tens_2 in zip(tval, reduced[tbs[0]]):
self.assertAlmostEqual(tens_1, tens_2)
# Test set
reduced[tkey] = "test_val"
self.assertEqual(reduced[tkey], "test_val")
# Test empty initialization
empty = TensorMapping()
self.assertEqual(empty._tensor_list, [])
def test_populate(self):
test_data = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "test_toec_data.json"))
sn = self.get_structure("Sn")
vtens = np.zeros((6, 6))
vtens[0, 0] = 259.31
vtens[0, 1] = 160.71
vtens[3, 3] = 73.48
et = Tensor.from_voigt(vtens)
populated = et.populate(sn, prec=1e-3).voigt.round(2)
self.assertAlmostEqual(populated[1, 1], 259.31)
self.assertAlmostEqual(populated[2, 2], 259.31)
self.assertAlmostEqual(populated[0, 2], 160.71)
self.assertAlmostEqual(populated[1, 2], 160.71)
self.assertAlmostEqual(populated[4, 4], 73.48)
self.assertAlmostEqual(populated[5, 5], 73.48)
# test a rank 6 example
vtens = np.zeros([6] * 3)
indices = [(0, 0, 0), (0, 0, 1), (0, 1, 2), (0, 3, 3), (0, 5, 5), (3, 4, 5)]
values = [-1271.0, -814.0, -50.0, -3.0, -780.0, -95.0]
for v, idx in zip(values, indices):
vtens[idx] = v
toec = Tensor.from_voigt(vtens)
toec = toec.populate(sn, prec=1e-3, verbose=True)
self.assertAlmostEqual(toec.voigt[1, 1, 1], -1271)
self.assertAlmostEqual(toec.voigt[0, 1, 1], -814)
self.assertAlmostEqual(toec.voigt[0, 2, 2], -814)
self.assertAlmostEqual(toec.voigt[1, 4, 4], -3)
self.assertAlmostEqual(toec.voigt[2, 5, 5], -3)
self.assertAlmostEqual(toec.voigt[1, 2, 0], -50)
self.assertAlmostEqual(toec.voigt[4, 5, 3], -95)
et = Tensor.from_voigt(test_data["C3_raw"]).fit_to_structure(sn)
new = np.zeros(et.voigt.shape)
for idx in indices:
new[idx] = et.voigt[idx]
new = Tensor.from_voigt(new).populate(sn)
self.assertArrayAlmostEqual(new, et, decimal=2)
def test_from_values_indices(self):
sn = self.get_structure("Sn")
indices = [(0, 0), (0, 1), (3, 3)]
values = [259.31, 160.71, 73.48]
et = Tensor.from_values_indices(values, indices, structure=sn, populate=True).voigt.round(4)
self.assertAlmostEqual(et[1, 1], 259.31)
self.assertAlmostEqual(et[2, 2], 259.31)
self.assertAlmostEqual(et[0, 2], 160.71)
self.assertAlmostEqual(et[1, 2], 160.71)
self.assertAlmostEqual(et[4, 4], 73.48)
self.assertAlmostEqual(et[5, 5], 73.48)
def test_serialization(self):
# Test base serialize-deserialize
d = self.symm_rank2.as_dict()
new = Tensor.from_dict(d)
self.assertArrayAlmostEqual(new, self.symm_rank2)
d = self.symm_rank3.as_dict(voigt=True)
new = Tensor.from_dict(d)
self.assertArrayAlmostEqual(new, self.symm_rank3)
def test_projection_methods(self):
self.assertAlmostEqual(self.rand_rank2.project([1, 0, 0]), self.rand_rank2[0, 0])
self.assertAlmostEqual(self.rand_rank2.project([1, 1, 1]), np.sum(self.rand_rank2) / 3)
# Test integration
self.assertArrayAlmostEqual(self.ones.average_over_unit_sphere(), 1)
def test_summary_methods(self):
self.assertEqual(
set(self.ones.get_grouped_indices()[0]),
set(itertools.product(range(3), range(3))),
)
self.assertEqual(self.ones.get_grouped_indices(voigt=True)[0], [(i,) for i in range(6)])
self.assertEqual(self.ones.get_symbol_dict(), {"T_1": 1})
self.assertEqual(self.ones.get_symbol_dict(voigt=False), {"T_11": 1})
def test_round(self):
test = self.non_symm + 0.01
rounded = test.round(1)
self.assertArrayAlmostEqual(rounded, self.non_symm)
self.assertTrue(isinstance(rounded, Tensor))
class TensorCollectionTest(PymatgenTest):
def setUp(self):
self.seq_tc = [t for t in np.arange(4 * 3 ** 3).reshape((4, 3, 3, 3))]
self.seq_tc = TensorCollection(self.seq_tc)
self.rand_tc = TensorCollection([t for t in np.random.random((4, 3, 3))])
self.diff_rank = TensorCollection([np.ones([3] * i) for i in range(2, 5)])
self.struct = self.get_structure("Si")
ieee_file_path = os.path.join(PymatgenTest.TEST_FILES_DIR, "ieee_conversion_data.json")
self.ieee_data = loadfn(ieee_file_path)
def list_based_function_check(self, attribute, coll, *args, **kwargs):
"""
This function allows for more efficient testing of list-based
functions in a "collection"-style class like TensorCollection
It ensures that the test function
"""
tc_orig = TensorCollection(coll)
tc_mod = getattr(tc_orig, attribute)
if callable(tc_mod):
tc_mod = tc_mod(*args, **kwargs)
for t_orig, t_mod in zip(tc_orig, tc_mod):
this_mod = getattr(t_orig, attribute)
if callable(this_mod):
this_mod = this_mod(*args, **kwargs)
if isinstance(this_mod, np.ndarray):
self.assertArrayAlmostEqual(this_mod, t_mod)
def test_list_based_functions(self):
# zeroed
tc = TensorCollection([1e-4 * Tensor(np.eye(3))] * 4)
for t in tc.zeroed():
self.assertArrayEqual(t, np.zeros((3, 3)))
for t in tc.zeroed(1e-5):
self.assertArrayEqual(t, 1e-4 * np.eye(3))
self.list_based_function_check("zeroed", tc)
self.list_based_function_check("zeroed", tc, tol=1e-5)
# transform
symm_op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, [0, 0, 1])
self.list_based_function_check("transform", self.seq_tc, symm_op=symm_op)
# symmetrized
self.list_based_function_check("symmetrized", self.seq_tc)
# rotation
a = 3.14 * 42.5 / 180
rotation = SquareTensor([[math.cos(a), 0, math.sin(a)], [0, 1, 0], [-math.sin(a), 0, math.cos(a)]])
self.list_based_function_check("rotate", self.diff_rank, matrix=rotation)
# is_symmetric
self.assertFalse(self.seq_tc.is_symmetric())
self.assertTrue(self.diff_rank.is_symmetric())
# fit_to_structure
self.list_based_function_check("fit_to_structure", self.diff_rank, self.struct)
self.list_based_function_check("fit_to_structure", self.seq_tc, self.struct)
# fit_to_structure
self.list_based_function_check("fit_to_structure", self.diff_rank, self.struct)
self.list_based_function_check("fit_to_structure", self.seq_tc, self.struct)
# voigt
self.list_based_function_check("voigt", self.diff_rank)
# is_voigt_symmetric
self.assertTrue(self.diff_rank.is_voigt_symmetric())
self.assertFalse(self.seq_tc.is_voigt_symmetric())
# Convert to ieee
for entry in self.ieee_data[:2]:
xtal = entry["xtal"]
tc = TensorCollection([entry["original_tensor"]] * 3)
struct = entry["structure"]
self.list_based_function_check("convert_to_ieee", tc, struct)
# from_voigt
tc_input = [t for t in np.random.random((3, 6, 6))]
tc = TensorCollection.from_voigt(tc_input)
for t_input, t in zip(tc_input, tc):
self.assertArrayAlmostEqual(Tensor.from_voigt(t_input), t)
def test_serialization(self):
# Test base serialize-deserialize
d = self.seq_tc.as_dict()
new = TensorCollection.from_dict(d)
for t, t_new in zip(self.seq_tc, new):
self.assertArrayAlmostEqual(t, t_new)
# Suppress vsym warnings and test voigt
with warnings.catch_warnings(record=True):
vsym = self.rand_tc.voigt_symmetrized
d = vsym.as_dict(voigt=True)
new_vsym = TensorCollection.from_dict(d)
for t, t_new in zip(vsym, new_vsym):
self.assertArrayAlmostEqual(t, t_new)
class SquareTensorTest(PymatgenTest):
def setUp(self):
self.rand_sqtensor = SquareTensor(np.random.randn(3, 3))
self.symm_sqtensor = SquareTensor([[0.1, 0.3, 0.4], [0.3, 0.5, 0.2], [0.4, 0.2, 0.6]])
self.non_invertible = SquareTensor([[0.1, 0, 0], [0.2, 0, 0], [0, 0, 0]])
self.non_symm = SquareTensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.2, 0.5, 0.5]])
self.low_val = SquareTensor([[1e-6, 1 + 1e-5, 1e-6], [1 + 1e-6, 1e-6, 1e-6], [1e-7, 1e-7, 1 + 1e-5]])
self.low_val_2 = SquareTensor([[1e-6, -1 - 1e-6, 1e-6], [1 + 1e-7, 1e-6, 1e-6], [1e-7, 1e-7, 1 + 1e-6]])
a = 3.14 * 42.5 / 180
self.rotation = SquareTensor([[math.cos(a), 0, math.sin(a)], [0, 1, 0], [-math.sin(a), 0, math.cos(a)]])
def test_new(self):
non_sq_matrix = [
[0.1, 0.2, 0.1],
[0.1, 0.2, 0.3],
[0.1, 0.2, 0.3],
[0.1, 0.1, 0.1],
]
bad_matrix = [[0.1, 0.2], [0.2, 0.3, 0.4], [0.2, 0.3, 0.5]]
too_high_rank = np.zeros((3, 3, 3))
self.assertRaises(ValueError, SquareTensor, non_sq_matrix)
self.assertRaises(ValueError, SquareTensor, bad_matrix)
self.assertRaises(ValueError, SquareTensor, too_high_rank)
def test_properties(self):
# transpose
self.assertArrayEqual(
self.non_symm.trans,
SquareTensor([[0.1, 0.4, 0.2], [0.2, 0.5, 0.5], [0.3, 0.6, 0.5]]),
)
self.assertArrayEqual(self.rand_sqtensor.trans, np.transpose(self.rand_sqtensor))
self.assertArrayEqual(self.symm_sqtensor, self.symm_sqtensor.trans)
# inverse
self.assertArrayEqual(self.non_symm.inv, np.linalg.inv(self.non_symm))
with self.assertRaises(ValueError):
self.non_invertible.inv
# determinant
self.assertEqual(self.rand_sqtensor.det, np.linalg.det(self.rand_sqtensor))
self.assertEqual(self.non_invertible.det, 0.0)
self.assertEqual(self.non_symm.det, 0.009)
# symmetrized
self.assertArrayEqual(
self.rand_sqtensor.symmetrized,
0.5 * (self.rand_sqtensor + self.rand_sqtensor.trans),
)
self.assertArrayEqual(self.symm_sqtensor, self.symm_sqtensor.symmetrized)
self.assertArrayAlmostEqual(
self.non_symm.symmetrized,
SquareTensor([[0.1, 0.3, 0.25], [0.3, 0.5, 0.55], [0.25, 0.55, 0.5]]),
)
# invariants
i1 = np.trace(self.rand_sqtensor)
i2 = (
self.rand_sqtensor[0, 0] * self.rand_sqtensor[1, 1]
+ self.rand_sqtensor[1, 1] * self.rand_sqtensor[2, 2]
+ self.rand_sqtensor[2, 2] * self.rand_sqtensor[0, 0]
- self.rand_sqtensor[0, 1] * self.rand_sqtensor[1, 0]
- self.rand_sqtensor[0, 2] * self.rand_sqtensor[2, 0]
- self.rand_sqtensor[2, 1] * self.rand_sqtensor[1, 2]
)
i3 = np.linalg.det(self.rand_sqtensor)
self.assertArrayAlmostEqual([i1, i2, i3], self.rand_sqtensor.principal_invariants)
def test_is_rotation(self):
self.assertTrue(self.rotation.is_rotation())
self.assertFalse(self.symm_sqtensor.is_rotation())
self.assertTrue(self.low_val_2.is_rotation())
self.assertFalse(self.low_val_2.is_rotation(tol=1e-8))
def test_refine_rotation(self):
self.assertArrayAlmostEqual(self.rotation, self.rotation.refine_rotation())
new = self.rotation.copy()
new[2, 2] += 0.02
self.assertFalse(new.is_rotation())
self.assertArrayAlmostEqual(self.rotation, new.refine_rotation())
new[1] *= 1.05
self.assertArrayAlmostEqual(self.rotation, new.refine_rotation())
def test_get_scaled(self):
self.assertArrayEqual(
self.non_symm.get_scaled(10.0),
SquareTensor([[1, 2, 3], [4, 5, 6], [2, 5, 5]]),
)
def test_polar_decomposition(self):
u, p = self.rand_sqtensor.polar_decomposition()
self.assertArrayAlmostEqual(np.dot(u, p), self.rand_sqtensor)
self.assertArrayAlmostEqual(np.eye(3), np.dot(u, np.conjugate(np.transpose(u))))
def test_serialization(self):
# Test base serialize-deserialize
d = self.rand_sqtensor.as_dict()
new = SquareTensor.from_dict(d)
self.assertArrayAlmostEqual(new, self.rand_sqtensor)
self.assertIsInstance(new, SquareTensor)
# Ensure proper object-independent deserialization
obj = MontyDecoder().process_decoded(d)
self.assertIsInstance(obj, SquareTensor)
with warnings.catch_warnings(record=True):
vsym = self.rand_sqtensor.voigt_symmetrized
d_vsym = vsym.as_dict(voigt=True)
new_voigt = Tensor.from_dict(d_vsym)
self.assertArrayAlmostEqual(vsym, new_voigt)
if __name__ == "__main__":
unittest.main()
|
|
"""15th Night Flask App."""
from email_client import send_email, verify_email
from flask import (
Flask, render_template, redirect, url_for, request, session, flash
)
from flask.ext.login import (
login_user, current_user, login_required, LoginManager
)
from twilio_client import send_sms
from werkzeug.exceptions import HTTPException
from app import database
from app.database import db_session
from app.forms import RegisterForm, LoginForm, AlertForm, ResponseForm, DeleteUserForm
from app.models import User, Alert, Response
from app.email_client import send_email
try:
from config import HOST_NAME
except:
from configdist import HOST_NAME
flaskapp = Flask(__name__)
try:
flaskapp.config.from_object('config')
except:
flaskapp.config.from_object('configdist')
flaskapp.secret_key = flaskapp.config['SECRET_KEY']
login_manager = LoginManager()
login_manager.init_app(flaskapp)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(id):
"""User loading needed by Flask-Login."""
return User.query.get(int(id))
@flaskapp.teardown_appcontext
def shutdown_session(response):
"""Database management."""
database.db_session.remove()
@flaskapp.errorhandler(404)
@flaskapp.errorhandler(Exception)
def error_page(error):
"""Generic Error handling."""
code = 500
if isinstance(error, HTTPException):
code = error.code
print(error)
return render_template("error.html", error_code=code), code
@flaskapp.route('/')
def index():
"""Handle routing to the dashboard if logged in or the login page."""
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
return render_template('home.html')
@flaskapp.route('/login', methods=['GET', 'POST'])
def login():
"""Route for handling the login page logic."""
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
# creates instance of form
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.get_by_email(request.form['email'].lower())
passwd = request.form.get("password")
if user is not None and user.check_password(passwd):
# session cookie in browser
session['logged_in'] = True
login_user(user)
flash('Logged in successfully.', 'success')
return redirect(request.args.get('next') or url_for('dashboard'))
else:
flash('Invalid Credentials. Please try again.', 'danger')
return render_template('login.html', form=form)
@flaskapp.route('/dashboard', methods=['GET', 'POST'])
@login_required
def dashboard():
"""Dashboard."""
if current_user.role == 'admin':
# Admin user, show register form
form = RegisterForm()
form_error = False
deleted_user = session.pop('deleted_user', False)
if request.method == 'POST' and form.validate_on_submit():
user = User(
email=form.email.data,
password=form.password.data,
phone_number=form.phone_number.data,
other=form.other.data,
shelter=form.shelter.data,
food=form.food.data,
clothes=form.clothes.data,
role=form.role.data
)
user.save()
verify_email(user.email)
flash('User registered succesfully', 'success')
return redirect(url_for('dashboard'))
elif request.method == 'POST' and not form.validate_on_submit():
form_error = True
return render_template('dashboard/admin.html',
form=form,
form_error=form_error,
users=User.get_users(),
alerts=Alert.get_alerts(),
delete_user_form=DeleteUserForm(),
deleted_user=deleted_user)
elif current_user.role == 'advocate':
# Advocate user, show alert form
form = AlertForm()
if request.method == 'POST' and form.validate_on_submit():
alert = Alert(
description=form.description.data,
other=form.other.data,
shelter=form.shelter.data,
food=form.food.data,
clothes=form.clothes.data,
gender=form.gender.data,
age=form.age.data,
user=current_user
)
alert.save()
users_to_notify = User.get_provider(alert.food, alert.clothes, alert.shelter, alert.other)
for user in users_to_notify:
print("found user to notify {}".format(user))
body = "There is a new 15th night alert. Go to " + \
HOST_NAME + \
"/respond_to/" + \
str(alert.id) + " to respond."
send_sms(to_number=user.phone_number, body=body)
send_email(user.email, '15th Night Alert', body)
flash('Alert sent successfully', 'success')
return redirect(url_for('dashboard'))
return render_template('dashboard/advocate.html', form=form)
else:
# Provider user, show alerts
return render_template(
'dashboard/provider.html',
user=current_user,
alerts=Alert.get_active_alerts_for_provider(current_user)
)
@flaskapp.route('/delete_user', methods=['POST'])
@login_required
def delete_user():
if current_user.role != 'admin':
flash('Access denied', 'danger')
return redirect(url_for('dashboard'))
form = DeleteUserForm()
if form.validate_on_submit():
user = User.get(form.id.data)
user.delete()
flash('User Deleted Successfully', 'success')
else:
flash('Failed to delete user', 'danger')
session['deleted_user'] = True
return redirect(url_for('dashboard'))
@flaskapp.route("/logout")
@login_required
def logout():
"""User logout."""
session.clear()
flash('You have been logged out!', 'success')
return redirect(url_for('index'))
@flaskapp.route('/health')
def healthcheck():
"""Low overhead health check."""
return 'ok', 200
@flaskapp.route('/about')
def about():
"""Simple about page route."""
return render_template('about.html')
@flaskapp.route('/contact', methods=['GET', 'POST'])
def contact():
if request.method == 'POST':
flash('you tried to make a post')
name = request.form['name']
email = request.form['email']
message = request.form['message']
send_email(to=email, subject="Contact Form", body=message)
return redirect(url_for('login'))
return render_template('contact.html')
@flaskapp.route('/respond_to/<int:alert_id>', methods=['GET','POST'])
@login_required
def response_submitted(alert_id):
"""
Action performed when a response is provided.
Text the creator of the alert:
- email, phone, and things able to help with of the responding user.
"""
if request.method == 'POST':
submitted_message = request.form['message']
responding_user = current_user
try:
alert = Alert.query.get(int(alert_id))
except Exception as e:
return 'Error {}'.format(e), 404
user_to_message = alert.user
response_message = "%s" % responding_user.email
if responding_user.phone_number:
response_message += ", %s" % responding_user.phone_number
response_message += " is availble for: "
availble = {
"shelter": responding_user.shelter,
"clothes": responding_user.clothes,
"food": responding_user.food,
"other": responding_user.other,
}
response_message += "%s" % ", ".join(k for k, v in availble.items() if v)
response_message += " Message: " + submitted_message
if user_to_message.phone_number:
send_sms(
user_to_message.phone_number,
response_message
)
send_email(
to=user_to_message.email,
subject="Alert Response",
body=response_message,
)
Response(user=current_user, alert=alert, message=submitted_message).save()
flash('Your response has been sent to the advocate, thank you!', 'success')
return redirect(url_for('dashboard'))
else:
try:
alert = Alert.query.get(int(alert_id))
except Exception as e:
return 'Error {}'.format(e), 404
return render_template('respond_to.html', alert=alert, user=current_user, form=ResponseForm())
if __name__ == '__main__':
flaskapp.run(debug=True)
|
|
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This template creates a single project with the specified service
accounts and APIs enabled.
"""
import copy
def generate_config(context):
""" Entry point for the deployment resources. """
project_id = context.properties.get('projectId', context.env['name'])
project_name = context.properties.get('name', context.env['name'])
# Ensure that the parent ID is a string.
context.properties['parent']['id'] = str(context.properties['parent']['id'])
resources = [
{
'name': 'project',
'type': 'cloudresourcemanager.v1.project',
'properties':
{
'name': project_name,
'projectId': project_id,
'parent': context.properties['parent']
}
},
{
'name': 'billing',
'type': 'deploymentmanager.v2.virtual.projectBillingInfo',
'properties':
{
'name':
'projects/$(ref.project.projectId)',
'billingAccountName':
'billingAccounts/' +
context.properties['billingAccountId']
}
}
]
api_resources, api_names_list = activate_apis(context.properties)
resources.extend(api_resources)
resources.extend(create_service_accounts(context, project_id))
resources.extend(create_bucket(context.properties))
resources.extend(create_shared_vpc(project_id, context.properties))
if context.properties.get('removeDefaultVPC', True):
resources.extend(delete_default_network(api_names_list))
if context.properties.get('removeDefaultSA', True):
resources.extend(delete_default_service_account(api_names_list))
return {
'resources':
resources,
'outputs':
[
{
'name': 'projectId',
'value': '$(ref.project.projectId)'
},
{
'name': 'usageExportBucketName',
'value': '$(ref.project.projectId)-usage-export'
},
{
'name':
'serviceAccountDisplayName',
'value':
'$(ref.project.projectNumber)@cloudservices.gserviceaccount.com' # pylint: disable=line-too-long
},
{
'name':
'resources',
'value':
[resource['name'] for resource in resources]
}
]
}
def activate_apis(properties):
""" Resources for API activation. """
concurrent_api_activation = properties.get('concurrentApiActivation')
apis = properties.get('activateApis', [])
# Enable the storage-component API if the usage export bucket is enabled.
if (
properties.get('usageExportBucket') and
'storage-component.googleapis.com' not in apis
):
apis.append('storage-component.googleapis.com')
resources = []
api_names_list = ['billing']
for api in properties.get('activateApis', []):
depends_on = ['billing']
# Serialize activation of all APIs by making apis[n]
# depend on apis[n-1].
if resources and not concurrent_api_activation:
depends_on.append(resources[-1]['name'])
api_name = 'api-' + api
api_names_list.append(api_name)
resources.append(
{
'name': api_name,
'type': 'deploymentmanager.v2.virtual.enableService',
'metadata': {
'dependsOn': depends_on
},
'properties':
{
'consumerId': 'project:' + '$(ref.project.projectId)',
'serviceName': api
}
}
)
# Return the API resources to enable other resources to use them as
# dependencies, to ensure that they are created first. For example,
# the default VPC or service account.
return resources, api_names_list
def create_project_iam(dependencies, role_member_list):
""" Grant the shared project IAM permissions. """
policies_to_add = role_member_list
resources = [
{
# Get the IAM policy first, so as not to remove
# any existing bindings.
'name': 'get-iam-policy',
'action': 'gcp-types/cloudresourcemanager-v1:cloudresourcemanager.projects.getIamPolicy', # pylint: disable=line-too-long
'properties': {
'resource': '$(ref.project.projectId)'
},
'metadata':
{
'dependsOn': dependencies,
'runtimePolicy': ['UPDATE_ALWAYS']
}
},
{
# Set the IAM policy patching the existing policy
# with whatever is currently in the config.
'name': 'patch-iam-policy',
'action': 'gcp-types/cloudresourcemanager-v1:cloudresourcemanager.projects.setIamPolicy', # pylint: disable=line-too-long
'properties':
{
'resource': '$(ref.project.projectId)',
'policy': '$(ref.get-iam-policy)',
'gcpIamPolicyPatch':
{
'add': policies_to_add
}
}
}
]
return resources
def create_shared_vpc_subnet_iam(context, dependencies, members_list):
""" Grant the shared VPC subnet IAM permissions to Service Accounts. """
resources = []
if (
context.properties.get('sharedVPCSubnets') and
context.properties.get('sharedVPC')
):
# Grant the Service Accounts access to the shared VPC subnets.
# Note that, until there is a subnetwork IAM patch support,
# setIamPolicy will overwrite any existing policies on the subnet.
for i, subnet in enumerate(
context.properties.get('sharedVPCSubnets'), 1
):
resources.append(
{
'name': 'add-vpc-subnet-iam-policy-{}'.format(i),
'type': 'gcp-types/compute-beta:compute.subnetworks.setIamPolicy', # pylint: disable=line-too-long
'metadata':
{
'dependsOn': dependencies,
},
'properties':
{
'name': subnet['subnetId'],
'project': context.properties['sharedVPC'],
'region': subnet['region'],
'bindings': [
{
'role': 'roles/compute.networkUser',
'members': members_list
}
]
}
}
)
return resources
def create_service_accounts(context, project_id):
""" Create Service Accounts and grant project IAM permissions. """
resources = []
network_list = ['serviceAccount:$(ref.project.projectNumber)@cloudservices.gserviceaccount.com'] # pylint: disable=line-too-long
service_account_dep = []
policies_to_add = []
for service_account in context.properties['serviceAccounts']:
account_id = service_account['accountId']
display_name = service_account.get('displayName', account_id)
sa_name = 'serviceAccount:{}@{}.iam.gserviceaccount.com'.format(
account_id,
project_id
)
# Check if the member needs shared VPC permissions. Put in
# a list to grant the shared VPC subnet IAM permissions.
if service_account.get('networkAccess'):
network_list.append(sa_name)
# Build the service account bindings for the project IAM permissions.
for role in service_account['roles']:
policies_to_add.append({'role': role, 'members': [sa_name]})
# Build a list of SA resources to be used as a dependency
# for permission granting.
name = 'service-account-' + account_id
service_account_dep.append(name)
# Create the service account resource.
resources.append(
{
'name': name,
'type': 'iam.v1.serviceAccount',
'properties':
{
'accountId': account_id,
'displayName': display_name,
'projectId': '$(ref.project.projectId)'
}
}
)
# Build the group bindings for the project IAM permissions.
for group in context.properties['groups']:
group_name = 'group:{}'.format(group['name'])
for role in group['roles']:
policies_to_add.append({'role': role, 'members': [group_name]})
# Create the project IAM permissions.
if policies_to_add:
iam = create_project_iam(service_account_dep, policies_to_add)
resources.extend(iam)
if not context.properties.get('sharedVPCHost'):
# Create the shared VPC subnet IAM permissions.
resources.extend(
create_shared_vpc_subnet_iam(
context,
service_account_dep,
network_list
)
)
return resources
def create_bucket(properties):
""" Resources for the usage export bucket. """
resources = []
if properties.get('usageExportBucket'):
bucket_name = '$(ref.project.projectId)-usage-export'
# Create the bucket.
resources.append(
{
'name': 'create-usage-export-bucket',
'type': 'gcp-types/storage-v1:buckets',
'properties':
{
'project': '$(ref.project.projectId)',
'name': bucket_name
},
'metadata':
{
'dependsOn': ['api-storage-component.googleapis.com']
}
}
)
# Set the project's usage export bucket.
resources.append(
{
'name':
'set-usage-export-bucket',
'action':
'gcp-types/compute-v1:compute.projects.setUsageExportBucket', # pylint: disable=line-too-long
'properties':
{
'project': '$(ref.project.projectId)',
'bucketName': 'gs://' + bucket_name
},
'metadata': {
'dependsOn': ['create-usage-export-bucket']
}
}
)
return resources
def create_shared_vpc(project_id, properties):
""" Configure the project Shared VPC properties. """
resources = []
service_project = properties.get('sharedVPC')
if service_project:
resources.append(
{
'name': project_id + '-attach-xpn-service-' + service_project,
'type': 'compute.beta.xpnResource',
'metadata': {
'dependsOn': ['api-compute.googleapis.com']
},
'properties':
{
'project': service_project,
'xpnResource':
{
'id': '$(ref.project.projectId)',
'type': 'PROJECT',
}
}
}
)
elif properties.get('sharedVPCHost'):
resources.append(
{
'name': project_id + '-xpn-host',
'type': 'compute.beta.xpnHost',
'metadata': {
'dependsOn': ['api-compute.googleapis.com']
},
'properties': {
'project': '$(ref.project.projectId)'
}
}
)
return resources
def delete_default_network(api_names_list):
""" Delete the default network. """
icmp_name = 'delete-default-allow-icmp'
internal_name = 'delete-default-allow-internal'
rdp_name = 'delete-default-allow-rdp'
ssh_name = 'delete-default-allow-ssh'
resource = [
{
'name': icmp_name,
'action': 'gcp-types/compute-beta:compute.firewalls.delete',
'metadata': {
'dependsOn': api_names_list
},
'properties':
{
'firewall': 'default-allow-icmp',
'project': '$(ref.project.projectId)',
}
},
{
'name': internal_name,
'action': 'gcp-types/compute-beta:compute.firewalls.delete',
'metadata': {
'dependsOn': api_names_list
},
'properties':
{
'firewall': 'default-allow-internal',
'project': '$(ref.project.projectId)',
}
},
{
'name': rdp_name,
'action': 'gcp-types/compute-beta:compute.firewalls.delete',
'metadata': {
'dependsOn': api_names_list
},
'properties':
{
'firewall': 'default-allow-rdp',
'project': '$(ref.project.projectId)',
}
},
{
'name': ssh_name,
'action': 'gcp-types/compute-beta:compute.firewalls.delete',
'metadata': {
'dependsOn': api_names_list
},
'properties':
{
'firewall': 'default-allow-ssh',
'project': '$(ref.project.projectId)',
}
}
]
# Ensure the firewall rules are removed before deleting the VPC.
network_dependency = copy.copy(api_names_list)
network_dependency.extend([icmp_name, internal_name, rdp_name, ssh_name])
resource.append(
{
'name': 'delete-default-network',
'action': 'gcp-types/compute-beta:compute.networks.delete',
'metadata': {
'dependsOn': network_dependency
},
'properties':
{
'network': 'default',
'project': '$(ref.project.projectId)'
}
}
)
return resource
def delete_default_service_account(api_names_list):
""" Delete the default service account. """
resource = [
{
'name': 'delete-default-sa',
'action': 'gcp-types/iam-v1:iam.projects.serviceAccounts.delete',
'metadata':
{
'dependsOn': api_names_list,
'runtimePolicy': ['CREATE']
},
'properties':
{
'name':
'projects/$(ref.project.projectId)/serviceAccounts/$(ref.project.projectNumber)-compute@developer.gserviceaccount.com' # pylint: disable=line-too-long
}
}
]
return resource
|
|
import json
from rest_framework import status
from api.models import Launch
from api.tests.test__base import SLNAPITests
class LaunchTests(SLNAPITests):
def test_v300_upcoming_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/3.0.0/launch/upcoming/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertIsNotNone(data['results'][0]['isonet'])
self.assertIsNotNone(data['results'][0]['netstamp'])
self.check_permissions(path)
def test_v300_upcoming_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/3.0.0/launch/upcoming/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('isonet', data['results'][0])
self.assertNotIn('netstamp', data['results'][0])
self.check_permissions(path)
def test_v300_upcoming_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/3.0.0/launch/upcoming/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
if data['results'][0]['lsp']:
self.assertIn('founding_year', data['results'][0]['lsp'])
self.check_permissions(path)
def test_v300_previous_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/3.0.0/launch/previous/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
self.assertIsNotNone(data['results'][0]['isonet'])
self.assertIsNotNone(data['results'][0]['netstamp'])
self.check_permissions(path)
def test_v300_previous_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/3.0.0/launch/previous/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
self.assertNotIn('isonet', data['results'][0])
self.assertNotIn('netstamp', data['results'][0])
self.check_permissions(path)
def test_v300_previous_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/3.0.0/launch/previous/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
self.assertIn('founding_year', data['results'][0]['lsp'])
self.check_permissions(path)
def test_v310_upcoming_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/3.1.0/launch/upcoming/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.check_permissions(path)
def test_v310_upcoming_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/3.1.0/launch/upcoming/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.check_permissions(path)
def test_v310_upcoming_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/3.1.0/launch/upcoming/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
if data['results'][0]['lsp']:
self.assertIn('founding_year', data['results'][0]['lsp'])
self.check_permissions(path)
def test_v310_previous_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/3.1.0/launch/previous/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.check_permissions(path)
def test_v310_previous_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/3.1.0/launch/previous/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.check_permissions(path)
def test_v310_previous_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/3.1.0/launch/previous/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('founding_year', data['results'][0]['lsp'])
self.assertIn('name', data['results'][0]['status'])
self.check_permissions(path)
def test_v320_upcoming_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/3.2.0/launch/upcoming/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('first_stage', data['results'][0]['rocket'])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v320_upcoming_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/3.2.0/launch/upcoming/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertNotIn('rocket', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('name', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('landing', data['results'][0])
self.assertIn('orbit', data['results'][0])
self.check_permissions(path)
def test_v320_upcoming_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/3.2.0/launch/upcoming/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('diameter', data['results'][0]['rocket']['configuration'])
self.assertIn('first_stage', data['results'][0]['rocket'])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v320_previous_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/3.2.0/launch/previous/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('first_stage', data['results'][0]['rocket'])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v320_previous_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/3.2.0/launch/previous/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertNotIn('rocket', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('name', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('landing', data['results'][0])
self.assertIn('orbit', data['results'][0])
self.check_permissions(path)
def test_v320_previous_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/3.2.0/launch/previous/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['id'])
self.assertEqual(data['results'][0]['id'], launch.launch_library_id)
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('diameter', data['results'][0]['rocket']['configuration'])
self.assertIn('first_stage', data['results'][0]['rocket'])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v300_launch_with_landings(self):
path = '/3.0.0/launch/864/'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertNotIn('next', data)
self.assertNotIn('result', data)
self.assertNotIn('previous', data)
self.assertNotIn('count', data)
launch = Launch.objects.get(launch_library_id=data['id'])
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertEqual(data['status'], launch.status.id)
self.assertIn('netstamp', data)
self.assertIn('wsstamp', data)
self.assertIn('westamp', data)
self.assertIn('net', data)
self.assertIn('window_end', data)
self.assertIn('window_start', data)
self.assertIn('isonet', data)
self.assertIn('isostart', data)
self.assertIn('isoend', data)
self.assertEqual(data['launcher']['id'], launch.rocket.configuration.launch_library_id)
self.assertEqual(data['mission']['id'], launch.mission.launch_library_id)
if launch.rocket.configuration.manufacturer:
self.assertEqual(data['lsp']['id'], launch.rocket.configuration.manufacturer.id)
self.assertEqual(data['location']['id'], launch.pad.location.launch_library_id)
self.assertEqual(data['pad']['id'], launch.pad.launch_library_id)
def test_v320_launch_with_landings(self):
launch = Launch.objects.get(launch_library_id=864)
path = '/3.2.0/launch/864/'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertNotIn('next', data)
self.assertNotIn('result', data)
self.assertNotIn('previous', data)
self.assertNotIn('count', data)
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertIn('slug', data)
self.assertEqual(data['status']['id'], launch.status.id)
self.assertNotIn('netstamp', data)
self.assertNotIn('wsstamp', data)
self.assertNotIn('westamp', data)
self.assertIn('net', data)
self.assertIn('window_end', data)
self.assertIn('window_start', data)
self.assertNotIn('isonet', data)
self.assertNotIn('isostart', data)
self.assertNotIn('isoend', data)
self.assertEqual(data['rocket']['configuration']['id'], launch.rocket.configuration.launch_library_id)
self.assertEqual(data['rocket']['configuration']['launch_service_provider'], launch.rocket.configuration.manufacturer.name)
self.assertEqual(len(data['rocket']['first_stage']), launch.rocket.firststage.count())
for index, stage_data in enumerate(data['rocket']['first_stage']):
stage = launch.rocket.firststage[index]
self.assertEqual(stage_data['type'], stage.type.name)
self.assertEqual(stage_data['reused'], stage.reused)
self.assertEqual(stage_data['launcher_flight_number'], stage.launcher_flight_number)
self.assertEqual(stage_data['launcher']['id'], stage.launcher.id)
self.assertEqual(stage_data['launcher']['serial_number'], stage.launcher.serial_number)
self.assertEqual(stage_data['landing']['attempt'], stage.landing.attempt)
self.assertEqual(stage_data['landing']['success'], stage.landing.success)
self.assertEqual(stage_data['landing']['description'], stage.landing.description)
self.assertEqual(stage_data['landing']['location']['name'], stage.landing.location.name)
self.assertEqual(stage_data['landing']['type']['name'], stage.landing.type.name)
self.assertEqual(data['mission']['id'], launch.mission.launch_library_id)
self.assertEqual(data['pad']['id'], launch.pad.launch_library_id)
self.assertEqual(data['pad']['location']['id'], launch.pad.location.launch_library_id)
def test_v330_upcoming_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/api/3.3.0/launch/upcoming/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('launcher_stage', data['results'][0]['rocket'])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v330_upcoming_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/api/3.3.0/launch/upcoming/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertNotIn('rocket', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('name', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('landing', data['results'][0])
self.assertIn('orbit', data['results'][0])
self.check_permissions(path)
def test_v330_upcoming_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/api/3.3.0/launch/upcoming/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('diameter', data['results'][0]['rocket']['configuration'])
self.assertIn('launcher_stage', data['results'][0]['rocket'])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v330_previous_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/api/3.3.0/launch/previous/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('launcher_stage', data['results'][0]['rocket'])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v330_previous_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/api/3.3.0/launch/previous/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertNotIn('rocket', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('name', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('landing', data['results'][0])
self.assertIn('orbit', data['results'][0])
self.check_permissions(path)
def test_v330_previous_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/api/3.3.0/launch/previous/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('diameter', data['results'][0]['rocket']['configuration'])
self.assertIn('launcher_stage', data['results'][0]['rocket'])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v330_launch_with_landings(self):
launch = Launch.objects.get(launch_library_id=864)
path = '/api/3.3.0/launch/%s/' % launch.id
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertNotIn('next', data)
self.assertNotIn('result', data)
self.assertNotIn('previous', data)
self.assertNotIn('count', data)
self.assertEqual(data['id'], str(launch.id))
self.assertEqual(data['name'], launch.name)
self.assertIn('slug', data)
self.assertEqual(data['status']['id'], launch.status.id)
self.assertNotIn('netstamp', data)
self.assertNotIn('wsstamp', data)
self.assertNotIn('westamp', data)
self.assertIn('net', data)
self.assertIn('window_end', data)
self.assertIn('window_start', data)
self.assertNotIn('isonet', data)
self.assertNotIn('isostart', data)
self.assertNotIn('isoend', data)
self.assertEqual(data['rocket']['configuration']['id'], launch.rocket.configuration.id)
self.assertEqual(data['rocket']['configuration']['launch_service_provider']['name'], launch.rocket.configuration.manufacturer.name)
self.assertEqual(len(data['rocket']['launcher_stage']), launch.rocket.firststage.count())
for index, stage_data in enumerate(data['rocket']['launcher_stage']):
stage = launch.rocket.firststage[index]
self.assertEqual(stage_data['type'], stage.type.name)
self.assertEqual(stage_data['reused'], stage.reused)
self.assertEqual(stage_data['launcher_flight_number'], stage.launcher_flight_number)
self.assertEqual(stage_data['launcher']['id'], stage.launcher.id)
self.assertEqual(stage_data['launcher']['serial_number'], stage.launcher.serial_number)
self.assertEqual(stage_data['landing']['attempt'], stage.landing.attempt)
self.assertEqual(stage_data['landing']['success'], stage.landing.success)
self.assertEqual(stage_data['landing']['description'], stage.landing.description)
self.assertEqual(stage_data['landing']['location']['name'], stage.landing.location.name)
self.assertEqual(stage_data['landing']['type']['name'], stage.landing.type.name)
self.assertEqual(data['mission']['id'], launch.mission.id)
self.assertEqual(data['pad']['id'], launch.pad.id)
self.assertEqual(data['pad']['location']['id'], launch.pad.location.id)
def test_v340_upcoming_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/api/3.4.0/launch/upcoming/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('launcher_stage', data['results'][0]['rocket'])
self.assertIn('manufacturer', data['results'][0]['rocket']['configuration'])
self.assertIn('launch_service_provider', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v340_upcoming_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/api/3.4.0/launch/upcoming/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertNotIn('rocket', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('name', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('landing', data['results'][0])
self.assertIn('orbit', data['results'][0])
self.check_permissions(path)
def test_v340_upcoming_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/api/3.4.0/launch/upcoming/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('diameter', data['results'][0]['rocket']['configuration'])
self.assertIn('launcher_stage', data['results'][0]['rocket'])
self.assertIn('manufacturer', data['results'][0]['rocket']['configuration'])
self.assertIn('launch_service_provider', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v340_previous_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/api/3.4.0/launch/previous/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('launcher_stage', data['results'][0]['rocket'])
self.assertIn('manufacturer', data['results'][0]['rocket']['configuration'])
self.assertIn('launch_service_provider', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v340_previous_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/api/3.4.0/launch/previous/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertNotIn('rocket', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('name', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('landing', data['results'][0])
self.assertIn('orbit', data['results'][0])
self.check_permissions(path)
def test_v340_previous_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/api/3.4.0/launch/previous/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(data['next'])
self.assertIsNone(data['previous'])
self.assertEqual(data['count'], 2)
self.assertEqual(data['count'], 2)
launch = Launch.objects.get(launch_library_id=data['results'][0]['launch_library_id'])
self.assertEqual(data['results'][0]['id'], str(launch.id))
self.assertEqual(data['results'][0]['name'], launch.name)
self.assertNotIn('netstamp', data['results'][0])
self.assertNotIn('isonet', data['results'][0])
self.assertIn('name', data['results'][0]['status'])
self.assertIn('rocket', data['results'][0])
self.assertIn('net', data['results'][0])
self.assertIn('configuration', data['results'][0]['rocket'])
self.assertIn('diameter', data['results'][0]['rocket']['configuration'])
self.assertIn('launcher_stage', data['results'][0]['rocket'])
self.assertIn('manufacturer', data['results'][0]['rocket']['configuration'])
self.assertIn('launch_service_provider', data['results'][0])
self.assertIn('pad', data['results'][0])
self.assertIn('location', data['results'][0]['pad'])
self.check_permissions(path)
def test_v340_launch_with_landings(self):
launch = Launch.objects.get(launch_library_id=864)
path = '/api/3.4.0/launch/%s/' % launch.id
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertNotIn('next', data)
self.assertNotIn('result', data)
self.assertNotIn('previous', data)
self.assertNotIn('count', data)
self.assertEqual(data['id'], str(launch.id))
self.assertEqual(data['name'], launch.name)
self.assertIn('slug', data)
self.assertEqual(data['status']['id'], launch.status.id)
self.assertNotIn('netstamp', data)
self.assertNotIn('wsstamp', data)
self.assertNotIn('westamp', data)
self.assertIn('net', data)
self.assertIn('window_end', data)
self.assertIn('window_start', data)
self.assertNotIn('isonet', data)
self.assertNotIn('isostart', data)
self.assertNotIn('isoend', data)
self.assertIn('launch_service_provider', data)
self.assertEqual(data['rocket']['configuration']['id'], launch.rocket.configuration.id)
self.assertEqual(data['rocket']['configuration']['manufacturer']['name'], launch.rocket.configuration.manufacturer.name)
self.assertEqual(len(data['rocket']['launcher_stage']), launch.rocket.firststage.count())
for index, stage_data in enumerate(data['rocket']['launcher_stage']):
stage = launch.rocket.firststage[index]
self.assertEqual(stage_data['type'], stage.type.name)
self.assertEqual(stage_data['reused'], stage.reused)
self.assertEqual(stage_data['launcher_flight_number'], stage.launcher_flight_number)
self.assertEqual(stage_data['launcher']['id'], stage.launcher.id)
self.assertEqual(stage_data['launcher']['serial_number'], stage.launcher.serial_number)
self.assertEqual(stage_data['landing']['attempt'], stage.landing.attempt)
self.assertEqual(stage_data['landing']['success'], stage.landing.success)
self.assertEqual(stage_data['landing']['description'], stage.landing.description)
self.assertEqual(stage_data['landing']['location']['name'], stage.landing.location.name)
self.assertEqual(stage_data['landing']['type']['name'], stage.landing.type.name)
self.assertEqual(data['mission']['id'], launch.mission.id)
self.assertEqual(data['pad']['id'], launch.pad.id)
self.assertEqual(data['pad']['location']['id'], launch.pad.location.id)
|
|
# Python LDAP
import ldap
# Django
from django.utils.translation import ugettext_lazy as _
# Django Auth LDAP
import django_auth_ldap.config
from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion
# Tower
from awx.conf import fields
from awx.conf.fields import * # noqa
from awx.conf.license import feature_enabled
from awx.main.validators import validate_certificate
from awx.sso.validators import * # noqa
def get_subclasses(cls):
for subclass in cls.__subclasses__():
for subsubclass in get_subclasses(subclass):
yield subsubclass
yield subclass
class AuthenticationBackendsField(fields.StringListField):
# Mapping of settings that must be set in order to enable each
# authentication backend.
REQUIRED_BACKEND_SETTINGS = collections.OrderedDict([
('awx.sso.backends.LDAPBackend', [
'AUTH_LDAP_SERVER_URI',
]),
('awx.sso.backends.RADIUSBackend', [
'RADIUS_SERVER',
]),
('social.backends.google.GoogleOAuth2', [
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',
'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET',
]),
('social.backends.github.GithubOAuth2', [
'SOCIAL_AUTH_GITHUB_KEY',
'SOCIAL_AUTH_GITHUB_SECRET',
]),
('social.backends.github.GithubOrganizationOAuth2', [
'SOCIAL_AUTH_GITHUB_ORG_KEY',
'SOCIAL_AUTH_GITHUB_ORG_SECRET',
'SOCIAL_AUTH_GITHUB_ORG_NAME',
]),
('social.backends.github.GithubTeamOAuth2', [
'SOCIAL_AUTH_GITHUB_TEAM_KEY',
'SOCIAL_AUTH_GITHUB_TEAM_SECRET',
'SOCIAL_AUTH_GITHUB_TEAM_ID',
]),
('social.backends.azuread.AzureADOAuth2', [
'SOCIAL_AUTH_AZUREAD_OAUTH2_KEY',
'SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET',
]),
('awx.sso.backends.SAMLAuth', [
'SOCIAL_AUTH_SAML_SP_ENTITY_ID',
'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT',
'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY',
'SOCIAL_AUTH_SAML_ORG_INFO',
'SOCIAL_AUTH_SAML_TECHNICAL_CONTACT',
'SOCIAL_AUTH_SAML_SUPPORT_CONTACT',
'SOCIAL_AUTH_SAML_ENABLED_IDPS',
]),
('django.contrib.auth.backends.ModelBackend', []),
])
REQUIRED_BACKEND_FEATURE = {
'awx.sso.backends.LDAPBackend': 'ldap',
'awx.sso.backends.RADIUSBackend': 'enterprise_auth',
'awx.sso.backends.SAMLAuth': 'enterprise_auth',
}
@classmethod
def get_all_required_settings(cls):
all_required_settings = set(['LICENSE'])
for required_settings in cls.REQUIRED_BACKEND_SETTINGS.values():
all_required_settings.update(required_settings)
return all_required_settings
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', self._default_from_required_settings)
super(AuthenticationBackendsField, self).__init__(*args, **kwargs)
def _default_from_required_settings(self):
from django.conf import settings
try:
backends = settings._awx_conf_settings._get_default('AUTHENTICATION_BACKENDS')
except AttributeError:
backends = self.REQUIRED_BACKEND_SETTINGS.keys()
# Filter which authentication backends are enabled based on their
# required settings being defined and non-empty. Also filter available
# backends based on license features.
for backend, required_settings in self.REQUIRED_BACKEND_SETTINGS.items():
if backend not in backends:
continue
required_feature = self.REQUIRED_BACKEND_FEATURE.get(backend, '')
if not required_feature or feature_enabled(required_feature):
if all([getattr(settings, rs, None) for rs in required_settings]):
continue
backends = filter(lambda x: x != backend, backends)
return backends
class LDAPServerURIField(fields.URLField):
def __init__(self, **kwargs):
kwargs.setdefault('schemes', ('ldap', 'ldaps'))
super(LDAPServerURIField, self).__init__(**kwargs)
def run_validators(self, value):
for url in filter(None, re.split(r'[, ]', (value or ''))):
super(LDAPServerURIField, self).run_validators(url)
return value
class LDAPConnectionOptionsField(fields.DictField):
default_error_messages = {
'invalid_options': _('Invalid connection option(s): {invalid_options}.'),
}
def to_representation(self, value):
value = value or {}
opt_names = ldap.OPT_NAMES_DICT
# Convert integer options to their named constants.
repr_value = {}
for opt, opt_value in value.items():
if opt in opt_names:
repr_value[opt_names[opt]] = opt_value
return repr_value
def to_internal_value(self, data):
data = super(LDAPConnectionOptionsField, self).to_internal_value(data)
valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()])
invalid_options = set(data.keys()) - set(valid_options.keys())
if invalid_options:
options_display = json.dumps(list(invalid_options)).lstrip('[').rstrip(']')
self.fail('invalid_options', invalid_options=options_display)
# Convert named options to their integer constants.
internal_data = {}
for opt_name, opt_value in data.items():
internal_data[valid_options[opt_name]] = opt_value
return internal_data
class LDAPDNField(fields.CharField):
def __init__(self, **kwargs):
super(LDAPDNField, self).__init__(**kwargs)
self.validators.append(validate_ldap_dn)
def run_validation(self, data=empty):
value = super(LDAPDNField, self).run_validation(data)
# django-auth-ldap expects DN fields (like AUTH_LDAP_REQUIRE_GROUP)
# to be either a valid string or ``None`` (not an empty string)
return None if value == '' else value
class LDAPDNWithUserField(fields.CharField):
def __init__(self, **kwargs):
super(LDAPDNWithUserField, self).__init__(**kwargs)
self.validators.append(validate_ldap_dn_with_user)
def run_validation(self, data=empty):
value = super(LDAPDNWithUserField, self).run_validation(data)
# django-auth-ldap expects DN fields (like AUTH_LDAP_USER_DN_TEMPLATE)
# to be either a valid string or ``None`` (not an empty string)
return None if value == '' else value
class LDAPFilterField(fields.CharField):
def __init__(self, **kwargs):
super(LDAPFilterField, self).__init__(**kwargs)
self.validators.append(validate_ldap_filter)
class LDAPFilterWithUserField(fields.CharField):
def __init__(self, **kwargs):
super(LDAPFilterWithUserField, self).__init__(**kwargs)
self.validators.append(validate_ldap_filter_with_user)
class LDAPScopeField(fields.ChoiceField):
def __init__(self, choices=None, **kwargs):
choices = choices or [
('SCOPE_BASE', _('Base')),
('SCOPE_ONELEVEL', _('One Level')),
('SCOPE_SUBTREE', _('Subtree')),
]
super(LDAPScopeField, self).__init__(choices, **kwargs)
def to_representation(self, value):
for choice in self.choices.keys():
if value == getattr(ldap, choice):
return choice
return super(LDAPScopeField, self).to_representation(value)
def to_internal_value(self, data):
value = super(LDAPScopeField, self).to_internal_value(data)
return getattr(ldap, value)
class LDAPSearchField(fields.ListField):
default_error_messages = {
'invalid_length': _('Expected a list of three items but got {length} instead.'),
'type_error': _('Expected an instance of LDAPSearch but got {input_type} instead.'),
}
ldap_filter_field_class = LDAPFilterField
def to_representation(self, value):
if not value:
return []
if not isinstance(value, LDAPSearch):
self.fail('type_error', input_type=type(value))
return [
LDAPDNField().to_representation(value.base_dn),
LDAPScopeField().to_representation(value.scope),
self.ldap_filter_field_class().to_representation(value.filterstr),
]
def to_internal_value(self, data):
data = super(LDAPSearchField, self).to_internal_value(data)
if len(data) == 0:
return None
if len(data) != 3:
self.fail('invalid_length', length=len(data))
return LDAPSearch(
LDAPDNField().run_validation(data[0]),
LDAPScopeField().run_validation(data[1]),
self.ldap_filter_field_class().run_validation(data[2]),
)
class LDAPSearchWithUserField(LDAPSearchField):
ldap_filter_field_class = LDAPFilterWithUserField
class LDAPSearchUnionField(fields.ListField):
default_error_messages = {
'type_error': _('Expected an instance of LDAPSearch or LDAPSearchUnion but got {input_type} instead.'),
}
ldap_search_field_class = LDAPSearchWithUserField
def to_representation(self, value):
if not value:
return []
elif isinstance(value, LDAPSearchUnion):
return [self.ldap_search_field_class().to_representation(s) for s in value.searches]
elif isinstance(value, LDAPSearch):
return self.ldap_search_field_class().to_representation(value)
else:
self.fail('type_error', input_type=type(value))
def to_internal_value(self, data):
data = super(LDAPSearchUnionField, self).to_internal_value(data)
if len(data) == 0:
return None
if len(data) == 3 and isinstance(data[0], basestring):
return self.ldap_search_field_class().run_validation(data)
else:
search_args = []
for i in range(len(data)):
if not isinstance(data[i], list):
raise ValidationError('In order to ultilize LDAP Union, input element No. %d'
' should be a search query array.' % (i + 1))
try:
search_args.append(self.ldap_search_field_class().run_validation(data[i]))
except Exception as e:
if hasattr(e, 'detail') and isinstance(e.detail, list):
e.detail.insert(0, "Error parsing LDAP Union element No. %d:" % (i + 1))
raise e
return LDAPSearchUnion(*search_args)
class LDAPUserAttrMapField(fields.DictField):
default_error_messages = {
'invalid_attrs': _('Invalid user attribute(s): {invalid_attrs}.'),
}
valid_user_attrs = {'first_name', 'last_name', 'email'}
child = fields.CharField()
def to_internal_value(self, data):
data = super(LDAPUserAttrMapField, self).to_internal_value(data)
invalid_attrs = (set(data.keys()) - self.valid_user_attrs)
if invalid_attrs:
attrs_display = json.dumps(list(invalid_attrs)).lstrip('[').rstrip(']')
self.fail('invalid_attrs', invalid_attrs=attrs_display)
return data
class LDAPGroupTypeField(fields.ChoiceField):
default_error_messages = {
'type_error': _('Expected an instance of LDAPGroupType but got {input_type} instead.'),
}
def __init__(self, choices=None, **kwargs):
group_types = get_subclasses(django_auth_ldap.config.LDAPGroupType)
choices = choices or [(x.__name__, x.__name__) for x in group_types]
super(LDAPGroupTypeField, self).__init__(choices, **kwargs)
def to_representation(self, value):
if not value:
return ''
if not isinstance(value, django_auth_ldap.config.LDAPGroupType):
self.fail('type_error', input_type=type(value))
return value.__class__.__name__
def to_internal_value(self, data):
data = super(LDAPGroupTypeField, self).to_internal_value(data)
if not data:
return None
if data.endswith('MemberDNGroupType'):
return getattr(django_auth_ldap.config, data)(member_attr='member')
else:
return getattr(django_auth_ldap.config, data)()
class LDAPUserFlagsField(fields.DictField):
default_error_messages = {
'invalid_flag': _('Invalid user flag: "{invalid_flag}".'),
}
valid_user_flags = {'is_superuser', 'is_system_auditor'}
child = LDAPDNField()
def to_internal_value(self, data):
data = super(LDAPUserFlagsField, self).to_internal_value(data)
invalid_flags = (set(data.keys()) - self.valid_user_flags)
if invalid_flags:
self.fail('invalid_flag', invalid_flag=list(invalid_flags)[0])
return data
class LDAPDNMapField(fields.ListField):
default_error_messages = {
'type_error': _('Expected None, True, False, a string or list of strings but got {input_type} instead.'),
}
child = LDAPDNField()
def to_representation(self, value):
if isinstance(value, (list, tuple)):
return super(LDAPDNMapField, self).to_representation(value)
elif value in fields.NullBooleanField.TRUE_VALUES:
return True
elif value in fields.NullBooleanField.FALSE_VALUES:
return False
elif value in fields.NullBooleanField.NULL_VALUES:
return None
elif isinstance(value, basestring):
return self.child.to_representation(value)
else:
self.fail('type_error', input_type=type(value))
def to_internal_value(self, data):
if isinstance(data, (list, tuple)):
return super(LDAPDNMapField, self).to_internal_value(data)
elif data in fields.NullBooleanField.TRUE_VALUES:
return True
elif data in fields.NullBooleanField.FALSE_VALUES:
return False
elif data in fields.NullBooleanField.NULL_VALUES:
return None
elif isinstance(data, basestring):
return self.child.run_validation(data)
else:
self.fail('type_error', input_type=type(data))
class BaseDictWithChildField(fields.DictField):
default_error_messages = {
'missing_keys': _('Missing key(s): {missing_keys}.'),
'invalid_keys': _('Invalid key(s): {invalid_keys}.'),
}
child_fields = {
# 'key': fields.ChildField(),
}
allow_unknown_keys = False
def __init__(self, *args, **kwargs):
self.allow_blank = kwargs.pop('allow_blank', False)
super(BaseDictWithChildField, self).__init__(*args, **kwargs)
def to_representation(self, value):
value = super(BaseDictWithChildField, self).to_representation(value)
for k, v in value.items():
child_field = self.child_fields.get(k, None)
if child_field:
value[k] = child_field.to_representation(v)
elif self.allow_unknown_keys:
value[k] = v
return value
def to_internal_value(self, data):
data = super(BaseDictWithChildField, self).to_internal_value(data)
missing_keys = set()
for key, child_field in self.child_fields.items():
if not child_field.required:
continue
elif key not in data:
missing_keys.add(key)
if missing_keys and (data or not self.allow_blank):
keys_display = json.dumps(list(missing_keys)).lstrip('[').rstrip(']')
self.fail('missing_keys', missing_keys=keys_display)
if not self.allow_unknown_keys:
invalid_keys = set(data.keys()) - set(self.child_fields.keys())
if invalid_keys:
keys_display = json.dumps(list(invalid_keys)).lstrip('[').rstrip(']')
self.fail('invalid_keys', invalid_keys=keys_display)
for k, v in data.items():
child_field = self.child_fields.get(k, None)
if child_field:
data[k] = child_field.run_validation(v)
elif self.allow_unknown_keys:
data[k] = v
return data
class LDAPSingleOrganizationMapField(BaseDictWithChildField):
default_error_messages = {
'invalid_keys': _('Invalid key(s) for organization map: {invalid_keys}.'),
}
child_fields = {
'admins': LDAPDNMapField(allow_null=True, required=False),
'users': LDAPDNMapField(allow_null=True, required=False),
'remove_admins': fields.BooleanField(required=False),
'remove_users': fields.BooleanField(required=False),
}
class LDAPOrganizationMapField(fields.DictField):
child = LDAPSingleOrganizationMapField()
class LDAPSingleTeamMapField(BaseDictWithChildField):
default_error_messages = {
'missing_keys': _('Missing required key for team map: {invalid_keys}.'),
'invalid_keys': _('Invalid key(s) for team map: {invalid_keys}.'),
}
child_fields = {
'organization': fields.CharField(),
'users': LDAPDNMapField(allow_null=True, required=False),
'remove': fields.BooleanField(required=False),
}
class LDAPTeamMapField(fields.DictField):
child = LDAPSingleTeamMapField()
class RADIUSSecretField(fields.CharField):
def run_validation(self, data=empty):
value = super(RADIUSSecretField, self).run_validation(data)
if isinstance(value, unicode):
value = value.encode('utf-8')
return value
def to_internal_value(self, value):
value = super(RADIUSSecretField, self).to_internal_value(value)
if isinstance(value, unicode):
value = value.encode('utf-8')
return value
class SocialMapStringRegexField(fields.CharField):
def to_representation(self, value):
if isinstance(value, type(re.compile(''))):
flags = []
if value.flags & re.I:
flags.append('i')
if value.flags & re.M:
flags.append('m')
return '/{}/{}'.format(value.pattern, ''.join(flags))
else:
return super(SocialMapStringRegexField, self).to_representation(value)
def to_internal_value(self, data):
data = super(SocialMapStringRegexField, self).to_internal_value(data)
match = re.match(r'^/(?P<pattern>.*)/(?P<flags>[im]+)?$', data)
if match:
flags = 0
if match.group('flags'):
if 'i' in match.group('flags'):
flags |= re.I
if 'm' in match.group('flags'):
flags |= re.M
try:
return re.compile(match.group('pattern'), flags)
except re.error as e:
raise ValidationError('{}: {}'.format(e, data))
return data
class SocialMapField(fields.ListField):
default_error_messages = {
'type_error': _('Expected None, True, False, a string or list of strings but got {input_type} instead.'),
}
child = SocialMapStringRegexField()
def to_representation(self, value):
if isinstance(value, (list, tuple)):
return super(SocialMapField, self).to_representation(value)
elif value in fields.NullBooleanField.TRUE_VALUES:
return True
elif value in fields.NullBooleanField.FALSE_VALUES:
return False
elif value in fields.NullBooleanField.NULL_VALUES:
return None
elif isinstance(value, (basestring, type(re.compile('')))):
return self.child.to_representation(value)
else:
self.fail('type_error', input_type=type(value))
def to_internal_value(self, data):
if isinstance(data, (list, tuple)):
return super(SocialMapField, self).to_internal_value(data)
elif data in fields.NullBooleanField.TRUE_VALUES:
return True
elif data in fields.NullBooleanField.FALSE_VALUES:
return False
elif data in fields.NullBooleanField.NULL_VALUES:
return None
elif isinstance(data, basestring):
return self.child.run_validation(data)
else:
self.fail('type_error', input_type=type(data))
class SocialSingleOrganizationMapField(BaseDictWithChildField):
default_error_messages = {
'invalid_keys': _('Invalid key(s) for organization map: {invalid_keys}.'),
}
child_fields = {
'admins': SocialMapField(allow_null=True, required=False),
'users': SocialMapField(allow_null=True, required=False),
'remove_admins': fields.BooleanField(required=False),
'remove_users': fields.BooleanField(required=False),
}
class SocialOrganizationMapField(fields.DictField):
child = SocialSingleOrganizationMapField()
class SocialSingleTeamMapField(BaseDictWithChildField):
default_error_messages = {
'missing_keys': _('Missing required key for team map: {missing_keys}.'),
'invalid_keys': _('Invalid key(s) for team map: {invalid_keys}.'),
}
child_fields = {
'organization': fields.CharField(),
'users': SocialMapField(allow_null=True, required=False),
'remove': fields.BooleanField(required=False),
}
class SocialTeamMapField(fields.DictField):
child = SocialSingleTeamMapField()
class SAMLOrgInfoValueField(BaseDictWithChildField):
default_error_messages = {
'missing_keys': _('Missing required key(s) for org info record: {missing_keys}.'),
}
child_fields = {
'name': fields.CharField(),
'displayname': fields.CharField(),
'url': fields.URLField(),
}
allow_unknown_keys = True
class SAMLOrgInfoField(fields.DictField):
default_error_messages = {
'invalid_lang_code': _('Invalid language code(s) for org info: {invalid_lang_codes}.'),
}
child = SAMLOrgInfoValueField()
def to_internal_value(self, data):
data = super(SAMLOrgInfoField, self).to_internal_value(data)
invalid_keys = set()
for key in data.keys():
if not re.match(r'^[a-z]{2}(?:-[a-z]{2})??$', key, re.I):
invalid_keys.add(key)
if invalid_keys:
keys_display = json.dumps(list(invalid_keys)).lstrip('[').rstrip(']')
self.fail('invalid_lang_code', invalid_lang_codes=keys_display)
return data
class SAMLContactField(BaseDictWithChildField):
default_error_messages = {
'missing_keys': _('Missing required key(s) for contact: {missing_keys}.'),
}
child_fields = {
'givenName': fields.CharField(),
'emailAddress': fields.EmailField(),
}
allow_unknown_keys = True
class SAMLIdPField(BaseDictWithChildField):
default_error_messages = {
'missing_keys': _('Missing required key(s) for IdP: {missing_keys}.'),
}
child_fields = {
'entity_id': fields.CharField(),
'url': fields.URLField(),
'x509cert': fields.CharField(validators=[validate_certificate]),
'attr_user_permanent_id': fields.CharField(required=False),
'attr_first_name': fields.CharField(required=False),
'attr_last_name': fields.CharField(required=False),
'attr_username': fields.CharField(required=False),
'attr_email': fields.CharField(required=False),
}
allow_unknown_keys = True
class SAMLEnabledIdPsField(fields.DictField):
child = SAMLIdPField()
|
|
""" Unit tests for DynamoDB Config Store """
import time
import unittest
from random import random
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.exceptions import ItemNotFound, ValidationException
from boto.dynamodb2.table import Table
from dynamodb_config_store import DynamoDBConfigStore
from dynamodb_config_store.exceptions import MisconfiguredSchemaException
connection = DynamoDBConnection(
aws_access_key_id='foo',
aws_secret_access_key='bar',
host='localhost',
port=8000,
is_secure=False)
class TestCustomThroughput(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
self.read_units = 10
self.write_units = 8
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name,
read_units=self.read_units,
write_units=self.write_units)
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_custom_throughput(self):
""" Test that we can set custom thoughput for new tables """
throughput = self.table.describe()[u'Table'][u'ProvisionedThroughput']
self.assertEqual(throughput[u'ReadCapacityUnits'], self.read_units)
self.assertEqual(throughput[u'WriteCapacityUnits'], self.write_units)
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestCustomStoreAndOptionKeys(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
self.store_key = '_s'
self.option_key = '_o'
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name,
store_key=self.store_key,
option_key=self.option_key)
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_custom_store_and_option_keys(self):
""" Test that we can set custom store and option keys """
obj = {
'host': '127.0.0.1',
'port': 27017
}
# Insert the object
self.store.set('db', obj)
# Fetch the object directly from DynamoDB
kwargs = {
'_s': self.store_name,
'_o': 'db'
}
item = self.table.get_item(**kwargs)
self.assertEqual(item['_s'], self.store_name)
self.assertEqual(item['_o'], 'db')
self.assertEqual(item['host'], '127.0.0.1')
self.assertEqual(item['port'], 27017)
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestDefaultThroughput(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name)
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_custom_throughput(self):
""" Test that we can set custom thoughput for new tables """
throughput = self.table.describe()[u'Table'][u'ProvisionedThroughput']
self.assertEqual(throughput[u'ReadCapacityUnits'], 1)
self.assertEqual(throughput[u'WriteCapacityUnits'], 1)
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestGetOption(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name)
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_get(self):
""" Test that we can retrieve an object from the store """
obj = {
'endpoint': 'http://test.com',
'port': 80,
'username': 'test',
'password': 'something'
}
# Insert the object
self.store.set('api', obj)
# Retrieve the object
option = self.store.config.get('api')
self.assertNotIn('_store', option)
self.assertNotIn('_option', option)
self.assertEqual(option['endpoint'], obj['endpoint'])
self.assertEqual(option['port'], obj['port'])
self.assertEqual(option['username'], obj['username'])
self.assertEqual(option['password'], obj['password'])
def test_get_item_not_found(self):
""" Test that we can't retrieve non-existing items """
with self.assertRaises(ItemNotFound):
self.store.config.get('doesnotexist')
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestGetOptionAndKeysSubset(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name)
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_get(self):
""" Test that we can retrieve an object from the store """
obj = {
'endpoint': 'http://test.com',
'port': 80,
'username': 'test',
'password': 'something'
}
# Insert the object
self.store.set('api', obj)
# Retrieve the object
option = self.store.config.get('api', keys=['endpoint', 'port'])
self.assertNotIn('_store', option)
self.assertNotIn('_option', option)
self.assertNotIn('username', option)
self.assertNotIn('password', option)
self.assertEqual(option['endpoint'], obj['endpoint'])
self.assertEqual(option['port'], obj['port'])
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestGetFullStore(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name)
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_get_of_full_store(self):
""" Test that we can retrieve all objects in the store """
objApi = {
'endpoint': 'http://test.com',
'port': 80,
'username': 'test',
'password': 'something'
}
objUser = {
'username': 'luke',
'password': 'skywalker'
}
# Insert the object
self.store.set('api', objApi)
self.store.set('user', objUser)
# Retrieve all objects
options = self.store.config.get()
self.assertEquals(len(options), 2)
optApi = options['api']
optUser = options['user']
self.assertNotIn('_store', optApi)
self.assertNotIn('_option', optApi)
self.assertEqual(optApi['endpoint'], objApi['endpoint'])
self.assertEqual(optApi['port'], objApi['port'])
self.assertEqual(optApi['username'], objApi['username'])
self.assertEqual(optApi['password'], objApi['password'])
self.assertNotIn('_store', optUser)
self.assertNotIn('_option', optUser)
self.assertEqual(optUser['username'], objUser['username'])
self.assertEqual(optUser['password'], objUser['password'])
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestMisconfiguredSchemaException(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
# Instanciate the store
DynamoDBConfigStore(connection, self.table_name, self.store_name)
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_misconfigured_schema_store_key(self):
""" Test that an exception is raised if the store key is not an hash """
with self.assertRaises(MisconfiguredSchemaException):
DynamoDBConfigStore(
connection,
self.table_name,
self.store_name,
store_key='test')
def test_misconfigured_schema_option_key(self):
""" Test that an exception is raised if the option key isn't a range """
with self.assertRaises(MisconfiguredSchemaException):
DynamoDBConfigStore(
connection,
self.table_name,
self.store_name,
option_key='test')
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestSet(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name)
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_set(self):
""" Test that we can insert an object """
obj = {
'host': '127.0.0.1',
'port': 27017
}
# Insert the object
self.store.set('db', obj)
# Fetch the object directly from DynamoDB
kwargs = {
'_store': self.store_name,
'_option': 'db'
}
item = self.table.get_item(**kwargs)
self.assertEqual(item['_store'], self.store_name)
self.assertEqual(item['_option'], 'db')
self.assertEqual(item['host'], '127.0.0.1')
self.assertEqual(item['port'], 27017)
def test_update(self):
""" Test that we can change values in an option """
obj = {
'username': 'luke',
'password': 'skywalker'
}
# Insert the object
self.store.set('user', obj)
# Get the option
option = self.store.config.get('user')
self.assertEqual(option['username'], obj['username'])
self.assertEqual(option['password'], obj['password'])
# Updated version of the object
updatedObj = {
'username': 'anakin',
'password': 'skywalker'
}
# Insert the object
self.store.set('user', updatedObj)
# Get the option
option = self.store.config.get('user')
self.assertEqual(option['username'], updatedObj['username'])
self.assertEqual(option['password'], updatedObj['password'])
def test_update_with_new_keys(self):
""" Test that we can completely change the keys """
obj = {
'username': 'luke',
'password': 'skywalker'
}
# Insert the object
self.store.set('credentials', obj)
# Get the option
option = self.store.config.get('credentials')
self.assertEqual(option['username'], obj['username'])
self.assertEqual(option['password'], obj['password'])
# Updated version of the object
updatedObj = {
'access_key': 'anakin',
'secret_key': 'skywalker'
}
# Insert the object
self.store.set('credentials', updatedObj)
# Get the option
option = self.store.config.get('credentials')
self.assertEqual(option['access_key'], updatedObj['access_key'])
self.assertEqual(option['secret_key'], updatedObj['secret_key'])
self.assertNotIn('username', option)
self.assertNotIn('password', option)
def test_instert_too_large_object(self):
""" Test of inserting an object larger than 64 kb """
with self.assertRaises(ValidationException):
self.store.set(
'large',
{x: int(random()*100000000000000) for x in xrange(1, 9999)})
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestTimeBasedConfigStore(unittest.TestCase):
def setUp(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name,
config_store='TimeBasedConfigStore',
config_store_kwargs={'update_interval': 5})
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def test_time_based_config_store(self):
""" Test inserting and updating in time based config stores """
obj = {
'host': '127.0.0.1',
'port': 27017
}
# Insert the object
self.store.set('db', obj)
with self.assertRaises(AttributeError):
# We do not expect the attribute to exist until the
# config has been reloaded
self.store.config.db
# Force config reload
self.store.reload()
self.assertEqual(self.store.config.db['host'], obj['host'])
self.assertEqual(self.store.config.db['port'], obj['port'])
# Update the object
updatedObj = {
'host': '127.0.0.1',
'port': 8000
}
self.store.set('db', updatedObj)
self.assertEqual(self.store.config.db['host'], obj['host'])
self.assertEqual(self.store.config.db['port'], obj['port'])
time.sleep(5)
self.assertEqual(self.store.config.db['host'], updatedObj['host'])
self.assertEqual(self.store.config.db['port'], updatedObj['port'])
def tearDown(self):
""" Tear down the test case """
self.table.delete()
class TestNotImplementedConfigStore(unittest.TestCase):
def test_not_implemented_config_store(self):
# Configuration options
self.table_name = 'conf'
self.store_name = 'test'
with self.assertRaises(NotImplementedError):
# Instanciate the store
self.store = DynamoDBConfigStore(
connection,
self.table_name,
self.store_name,
config_store='NotExistingConfigStore')
# Get an Table instance for validation
self.table = Table(self.table_name, connection=connection)
def tearDown(self):
""" Tear down the test case """
self.table.delete()
def suite():
""" Defines the test suite """
suite_builder = unittest.TestSuite()
suite_builder.addTest(unittest.makeSuite(TestMisconfiguredSchemaException))
suite_builder.addTest(unittest.makeSuite(TestDefaultThroughput))
suite_builder.addTest(unittest.makeSuite(TestCustomThroughput))
suite_builder.addTest(unittest.makeSuite(TestSet))
suite_builder.addTest(unittest.makeSuite(TestGetOption))
suite_builder.addTest(unittest.makeSuite(TestGetOptionAndKeysSubset))
suite_builder.addTest(unittest.makeSuite(TestGetFullStore))
suite_builder.addTest(unittest.makeSuite(TestCustomStoreAndOptionKeys))
suite_builder.addTest(unittest.makeSuite(TestTimeBasedConfigStore))
suite_builder.addTest(unittest.makeSuite(TestNotImplementedConfigStore))
return suite_builder
if __name__ == '__main__':
test_suite = suite()
runner = unittest.TextTestRunner(verbosity=2)
runner.run(test_suite)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
import copy
import multiprocessing
import pytest
import socket
import os
import shutil
import json
from calvin.requests.request_handler import RequestHandler, RT
from calvin.utilities.nodecontrol import dispatch_node, dispatch_storage_node
from calvin.utilities.attribute_resolver import format_index_string
from calvin.utilities import certificate
from calvin.utilities import calvinlogger
from calvin.utilities import calvinconfig
from calvin.utilities import calvinuuid
_log = calvinlogger.get_logger(__name__)
_conf = calvinconfig.get()
request_handler = RequestHandler()
try:
ip_addr = os.environ["CALVIN_TEST_LOCALHOST"]
except:
ip_addr = socket.gethostbyname(socket.gethostname())
rt1 = None
rt2 = None
rt3 = None
rt1_id = None
rt2_id = None
rt3_id = None
test_script_dir = None
def absolute_filename(filename):
import os.path
return os.path.join(os.path.dirname(__file__), filename)
@pytest.mark.slow
class TestSecureDht(unittest.TestCase):
@pytest.fixture(autouse=True, scope="class")
def setup(self, request):
from calvin.Tools.csruntime import csruntime
from conftest import _config_pytest
homefolder = os.getenv("HOME")
domain = "rttest"
configdir = os.path.join(homefolder, ".calvin",
"security", domain)
try:
shutil.rmtree(configdir)
except:
pass
print "Trying to create a new test domain configuration."
testconfig = certificate.Config(domain=domain)
print "Reading configuration successfull."
print "Creating new domain."
certificate.new_domain(testconfig)
print "Created new domain."
# Now handled within runtime
#for i in range(3):
# name = "++++node{}".format(i)
# nodeid = calvinuuid.uuid("NODE")
# certreq = certificate.new_runtime(testconfig, name, nodeid=nodeid)
# certificate.sign_req(testconfig, os.path.basename(certreq), name)
global rt1
global rt2
global rt3
global test_script_dir
rt_conf = copy.deepcopy(_conf)
rt_conf.set('global', 'storage_type', 'securedht')
rt_conf.add_section('security')
rt_conf.set('security', "certificate_conf", None)
rt_conf.set('security', "certificate_domain", domain)
rt_conf.save("/tmp/calvin500x.conf")
try:
logfile = _config_pytest.getoption("logfile")+"5000"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5000, controlport=5003, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'name': 'node0'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin500x.conf")
rt1 = RT("http://%s:5003" % ip_addr)
try:
logfile = _config_pytest.getoption("logfile")+"5001"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5001, controlport=5004, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'name': 'node1'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin500x.conf")
rt2 = RT("http://%s:5004" % ip_addr)
try:
logfile = _config_pytest.getoption("logfile")+"5002"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5002, controlport=5005, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'name': 'node2'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin500x.conf")
rt3 = RT("http://%s:5005" % ip_addr)
test_script_dir = absolute_filename('scripts/')
request.addfinalizer(self.teardown)
def teardown(self):
global rt1
global rt2
global rt3
request_handler.quit(rt1)
request_handler.quit(rt2)
request_handler.quit(rt3)
time.sleep(0.2)
for p in multiprocessing.active_children():
p.terminate()
# They will die eventually (about 5 seconds) in most cases, but this makes sure without wasting time
os.system("pkill -9 -f -l 'csruntime -n %s -p 5000'" % (ip_addr,))
os.system("pkill -9 -f -l 'csruntime -n %s -p 5001'" % (ip_addr,))
os.system("pkill -9 -f -l 'csruntime -n %s -p 5002'" % (ip_addr,))
time.sleep(0.2)
def verify_storage(self):
global rt1
global rt2
global rt3
global rt1_id
global rt2_id
global rt3_id
rt1_id = None
rt2_id = None
rt3_id = None
failed = True
# Try 30 times waiting for control API to be up and running
for i in range(30):
try:
rt1_id = rt1_id or request_handler.get_node_id(rt1)
rt2_id = rt2_id or request_handler.get_node_id(rt2)
rt3_id = rt3_id or request_handler.get_node_id(rt3)
failed = False
break
except:
time.sleep(0.1)
assert not failed
assert rt1_id
assert rt2_id
assert rt3_id
print "RUNTIMES:", rt1_id, rt2_id, rt3_id
_log.analyze("TESTRUN", "+ IDS", {'waited': 0.1*i})
failed = True
# Try 30 times waiting for storage to be connected
caps1 = []
caps2 = []
caps3 = []
rt_ids = set([rt1_id, rt2_id, rt3_id])
for i in range(30):
try:
if not (rt1_id in caps1 and rt2_id in caps1 and rt3_id in caps1):
caps1 = request_handler.get_index(rt1, "node/capabilities/calvinsys.native.python-json")['result']
if not (rt1_id in caps2 and rt2_id in caps2 and rt3_id in caps2):
caps2 = request_handler.get_index(rt2, "node/capabilities/calvinsys.native.python-json")['result']
if not (rt1_id in caps3 and rt2_id in caps3 and rt3_id in caps3):
caps3 = request_handler.get_index(rt3, "node/capabilities/calvinsys.native.python-json")['result']
if rt_ids <= set(caps1) and rt_ids <= set(caps2) and rt_ids <= set(caps3):
failed = False
break
else:
time.sleep(0.1)
except:
time.sleep(0.1)
assert not failed
_log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1*i})
# Now check for the values needed by this specific test
caps = request_handler.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
assert rt1_id in caps['result']
_log.analyze("TESTRUN", "+ RT1 CAPS", {})
caps = request_handler.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
assert rt1_id in caps['result']
_log.analyze("TESTRUN", "+ RT2 CAPS", {})
assert request_handler.get_index(rt1, format_index_string(['node_name', {'name': 'node2'}]))
_log.analyze("TESTRUN", "+ RT1 INDEX", {})
assert request_handler.get_index(rt2, format_index_string(['node_name', {'name': 'node1'}]))
_log.analyze("TESTRUN", "+ RT2 INDEX", {})
@pytest.mark.slow
def testSecureDHTVerifyStorage(self):
_log.analyze("TESTRUN", "+", {})
self.verify_storage()
"""
@pytest.mark.slow
def testDeployStillShadow(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global rt2
global rt3
global rt1_id
global rt2_id
global rt3_id
global test_script_dir
self.verify_storage()
from calvin.Tools.cscontrol import control_deploy as deploy_app
from collections import namedtuple
DeployArgs = namedtuple('DeployArgs', ['node', 'attr', 'script','reqs', 'check'])
args = DeployArgs(node='http://%s:5004' % ip_addr,
script=open(test_script_dir+"test_shadow1.calvin"), attr=None,
reqs=None, check=False)
result = {}
try:
result = deploy_app(args)
except:
raise Exception("Failed deployment of app %s, no use to verify if requirements fulfilled" % args.script.name)
#print "RESULT:", result
assert result['requirements_fulfilled']
time.sleep(1)
request_handler.migrate(rt2, result['actor_map']['test_shadow1:snk'], rt1_id)
time.sleep(1)
actors = [request_handler.get_actors(rt1), request_handler.get_actors(rt2), request_handler.get_actors(rt3)]
# src -> rt2, sum -> rt2, snk -> rt1
assert result['actor_map']['test_shadow1:src'] in actors[1]
assert result['actor_map']['test_shadow1:sum'] in actors[1]
assert result['actor_map']['test_shadow1:snk'] in actors[0]
actual = request_handler.report(rt1, result['actor_map']['test_shadow1:snk'])
assert len(actual) == 0
request_handler.migrate(rt2, result['actor_map']['test_shadow1:src'], rt3_id)
time.sleep(1)
actors = [request_handler.get_actors(rt1), request_handler.get_actors(rt2), request_handler.get_actors(rt3)]
# src -> rt3, sum -> rt2, snk -> rt1
assert result['actor_map']['test_shadow1:src'] in actors[2]
assert result['actor_map']['test_shadow1:sum'] in actors[1]
assert result['actor_map']['test_shadow1:snk'] in actors[0]
actual = request_handler.report(rt1, result['actor_map']['test_shadow1:snk'])
assert len(actual) == 0
request_handler.migrate(rt3, result['actor_map']['test_shadow1:src'], rt1_id)
time.sleep(1)
actors = [request_handler.get_actors(rt1), request_handler.get_actors(rt2), request_handler.get_actors(rt3)]
# src -> rt1, sum -> rt2, snk -> rt1
assert result['actor_map']['test_shadow1:src'] in actors[0]
assert result['actor_map']['test_shadow1:sum'] in actors[1]
assert result['actor_map']['test_shadow1:snk'] in actors[0]
actual = request_handler.report(rt1, result['actor_map']['test_shadow1:snk'])
assert len(actual) > 3
request_handler.delete_application(rt2, result['application_id'])
@pytest.mark.slow
def testDeployFailReqs(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global rt2
global rt3
global rt1_id
global rt2_id
global rt3_id
global test_script_dir
self.verify_storage()
from calvin.Tools.cscontrol import control_deploy as deploy_app
from collections import namedtuple
DeployArgs = namedtuple('DeployArgs', ['node', 'attr', 'script','reqs', 'check'])
args = DeployArgs(node='http://%s:5004' % ip_addr,
script=open(test_script_dir+"test_shadow1.calvin"), attr=None,
reqs=test_script_dir+"test_shadow6.deployjson", check=False)
result = {}
try:
result = deploy_app(args)
except:
raise Exception("Failed deployment of app %s, no use to verify if requirements fulfilled" % args.script.name)
#print "RESULT:", result
time.sleep(1)
assert not result['requirements_fulfilled']
request_handler.delete_application(rt2, result['application_id'])
"""
|
|
from sys import getsizeof
import operator
import numpy as np
import pandas.index as _index
from pandas.types.common import (is_integer,
is_scalar,
is_int64_dtype)
from pandas import compat
from pandas.compat import lrange, range
from pandas.compat.numpy import function as nv
from pandas.indexes.base import Index, _index_shared_docs
from pandas.util.decorators import Appender, cache_readonly
import pandas.indexes.base as ibase
from pandas.indexes.numeric import Int64Index
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic range. RangeIndex is a
memory-saving special case of Int64Index limited to representing
monotonic ranges.
Parameters
----------
start : int (default: 0), or other RangeIndex instance.
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index
copy : bool, default False
Unused, accepted for homogeneity with other index types.
"""
_typ = 'rangeindex'
_engine_type = _index.Int64Engine
def __new__(cls, start=None, stop=None, step=None, name=None, dtype=None,
fastpath=False, copy=False, **kwargs):
if fastpath:
return cls._simple_new(start, stop, step, name=name)
cls._validate_dtype(dtype)
# RangeIndex
if isinstance(start, RangeIndex):
if name is None:
name = start.name
return cls._simple_new(name=name,
**dict(start._get_data_as_items()))
# validate the arguments
def _ensure_int(value, field):
msg = ("RangeIndex(...) must be called with integers,"
" {value} was passed for {field}")
if not is_scalar(value):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
try:
new_value = int(value)
assert(new_value == value)
except (TypeError, ValueError, AssertionError):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
return new_value
if start is None and stop is None and step is None:
msg = "RangeIndex(...) must be called with integers"
raise TypeError(msg)
elif start is None:
start = 0
else:
start = _ensure_int(start, 'start')
if stop is None:
stop = start
start = 0
else:
stop = _ensure_int(stop, 'stop')
if step is None:
step = 1
elif step == 0:
raise ValueError("Step must not be zero")
else:
step = _ensure_int(step, 'step')
return cls._simple_new(start, stop, step, name)
@classmethod
def from_range(cls, data, name=None, dtype=None, **kwargs):
""" create RangeIndex from a range (py3), or xrange (py2) object """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
'range, {1} was passed'.format(cls.__name__, repr(data)))
if compat.PY3:
step = data.step
stop = data.stop
start = data.start
else:
# seems we only have indexing ops to infer
# rather than direct accessors
if len(data) > 1:
step = data[1] - data[0]
stop = data[-1] + step
start = data[0]
elif len(data):
start = data[0]
stop = data[0] + 1
step = 1
else:
start = stop = 0
step = 1
return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
@classmethod
def _simple_new(cls, start, stop=None, step=None, name=None,
dtype=None, **kwargs):
result = object.__new__(cls)
# handle passed None, non-integers
if start is None and stop is None:
# empty
start, stop, step = 0, 0, 1
if start is None or not is_integer(start):
try:
return RangeIndex(start, stop, step, name=name, **kwargs)
except TypeError:
return Index(start, stop, step, name=name, **kwargs)
result._start = start
result._stop = stop or 0
result._step = step or 1
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@staticmethod
def _validate_dtype(dtype):
""" require dtype to be None or int64 """
if not (dtype is None or is_int64_dtype(dtype)):
raise TypeError('Invalid to pass a non-int64 dtype to RangeIndex')
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self):
return np.arange(self._start, self._stop, self._step, dtype=np.int64)
@cache_readonly
def _int64index(self):
return Int64Index(self._data, name=self.name, fastpath=True)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
return [('start', self._start),
('stop', self._stop),
('step', self._step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (self.__class__, d), None
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
return attrs
def _format_data(self):
# we are formatting thru the attributes
return None
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
return sum([getsizeof(getattr(self, v)) for v in
['_start', '_stop', '_step']])
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self):
return np.dtype(np.int64)
@property
def is_unique(self):
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self):
return self._step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self):
return self._step < 0 or len(self) <= 1
@property
def has_duplicates(self):
return False
def tolist(self):
return lrange(self._start, self._stop, self._step)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
return RangeIndex(name=self.name, fastpath=True,
**dict(self._get_data_as_items()))
else:
kwargs.setdefault('name', self.name)
return self._int64index._shallow_copy(values, **kwargs)
@Appender(ibase._index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
self._validate_dtype(dtype)
if name is None:
name = self.name
return RangeIndex(name=name, fastpath=True,
**dict(self._get_data_as_items()))
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
ls = len(self)
lo = len(other)
return (ls == lo == 0 or
ls == lo == 1 and
self._start == other._start or
ls == lo and
self._start == other._start and
self._step == other._step)
return super(RangeIndex, self).equals(other)
def intersection(self, other):
"""
Form the intersection of two Index objects. Sortedness of the result is
not guaranteed
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
"""
if not isinstance(other, RangeIndex):
return super(RangeIndex, self).intersection(other)
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(min(self._start, self._stop + 1),
min(other._start, other._stop + 1))
int_high = min(max(self._stop, self._start + 1),
max(other._stop, other._start + 1))
if int_high <= int_low:
return RangeIndex._simple_new(None)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = self._extended_gcd(self._step, other._step)
# check whether element sets intersect
if (self._start - other._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = self._start + (other._start - self._start) * \
self._step // gcd * s
new_step = self._step * other._step // gcd
new_index = RangeIndex(tmp_start, int_high, new_step, fastpath=True)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
return new_index
def _min_fitting_element(self, lower_limit):
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self._start) // abs(self._step))
return self._start + abs(self._step) * no_steps
def _max_fitting_element(self, upper_limit):
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self._start) // abs(self._step)
return self._start + abs(self._step) * no_steps
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def union(self, other):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
if len(self) == 0:
return other
if isinstance(other, RangeIndex):
start_s, step_s = self._start, self._step
end_s = self._start + self._step * (len(self) - 1)
start_o, step_o = other._start, other._step
end_o = other._start + other._step * (len(other) - 1)
if self._step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other._step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self._start - other._start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if ((start_s - start_o) % step_s == 0 and
(start_s - end_o) <= step_s and
(start_o - end_s) <= step_s):
return RangeIndex(start_r, end_r + step_s, step_s)
if ((step_s % 2 == 0) and
(abs(start_s - start_o) <= step_s / 2) and
(abs(end_s - end_o) <= step_s / 2)):
return RangeIndex(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if ((start_o - start_s) % step_s == 0 and
(start_o + step_s >= start_s) and
(end_o - step_s <= end_s)):
return RangeIndex(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if ((start_s - start_o) % step_o == 0 and
(start_s + step_o >= start_o) and
(end_s - step_o <= end_o)):
return RangeIndex(start_r, end_r + step_o, step_o)
return self._int64index.union(other)
def join(self, other, how='left', level=None, return_indexers=False):
"""
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
Returns
-------
join_index, (left_indexer, right_indexer)
"""
if how == 'outer' and self is not other:
# note: could return RangeIndex in more circumstances
return self._int64index.join(other, how, level, return_indexers)
return super(RangeIndex, self).join(other, how, level, return_indexers)
def __len__(self):
"""
return the length of the RangeIndex
"""
return max(0, -(-(self._stop - self._start) // self._step))
@property
def size(self):
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
super_getitem = super(RangeIndex, self).__getitem__
if is_scalar(key):
n = int(key)
if n != key:
return super_getitem(key)
if n < 0:
n = len(self) + key
if n < 0 or n > len(self) - 1:
raise IndexError("index {key} is out of bounds for axis 0 "
"with size {size}".format(key=key,
size=len(self)))
return self._start + n * self._step
if isinstance(key, slice):
# This is basically PySlice_GetIndicesEx, but delegation to our
# super routines if we don't have integers
l = len(self)
# complete missing slice information
step = 1 if key.step is None else key.step
if key.start is None:
start = l - 1 if step < 0 else 0
else:
start = key.start
if start < 0:
start += l
if start < 0:
start = -1 if step < 0 else 0
if start >= l:
start = l - 1 if step < 0 else l
if key.stop is None:
stop = -1 if step < 0 else l
else:
stop = key.stop
if stop < 0:
stop += l
if stop < 0:
stop = -1
if stop > l:
stop = l
# delegate non-integer slices
if (start != int(start) or
stop != int(stop) or
step != int(step)):
return super_getitem(key)
# convert indexes to values
start = self._start + self._step * start
stop = self._start + self._step * stop
step = self._step * step
return RangeIndex(start, stop, step, self.name, fastpath=True)
# fall back to Int64Index
return super_getitem(key)
def __floordiv__(self, other):
if is_integer(other):
if (len(self) == 0 or
self._start % other == 0 and
self._step % other == 0):
start = self._start // other
step = self._step // other
stop = start + len(self) * step
return RangeIndex(start, stop, step, name=self.name,
fastpath=True)
if len(self) == 1:
start = self._start // other
return RangeIndex(start, start + 1, 1, name=self.name,
fastpath=True)
return self._int64index // other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, opstr, reversed=False, step=False):
"""
Parameters
----------
op : callable that accepts 2 parms
perform the binary op
opstr : string
string name of ops
reversed : boolean, default False
if this is a reversed op, e.g. radd
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
def _evaluate_numeric_binop(self, other):
other = self._validate_for_numeric_binop(other, op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if reversed:
self, other = other, self
try:
# alppy if we have an override
if step:
with np.errstate(all='ignore'):
rstep = step(self._step, other)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = self._step
with np.errstate(all='ignore'):
rstart = op(self._start, other)
rstop = op(self._stop, other)
result = RangeIndex(rstart,
rstop,
rstep,
**attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all([is_integer(x) for x in
[rstart, rstop, rstep]]):
result = result.astype('float64')
return result
except (ValueError, TypeError, AttributeError):
pass
# convert to Int64Index ops
if isinstance(self, RangeIndex):
self = self.values
if isinstance(other, RangeIndex):
other = other.values
with np.errstate(all='ignore'):
results = op(self, other)
return Index(results, **attrs)
return _evaluate_numeric_binop
cls.__add__ = cls.__radd__ = _make_evaluate_binop(
operator.add, '__add__')
cls.__sub__ = _make_evaluate_binop(operator.sub, '__sub__')
cls.__rsub__ = _make_evaluate_binop(
operator.sub, '__sub__', reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(
operator.mul,
'__mul__',
step=operator.mul)
cls.__truediv__ = _make_evaluate_binop(
operator.truediv,
'__truediv__',
step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(
operator.truediv,
'__truediv__',
reversed=True,
step=operator.truediv)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(
operator.div,
'__div__',
step=operator.div)
cls.__rdiv__ = _make_evaluate_binop(
operator.div,
'__div__',
reversed=True,
step=operator.div)
RangeIndex._add_numeric_methods()
RangeIndex._add_logical_methods()
|
|
"""
**hep_ml.commonutils** contains some helpful functions and classes
which are often used (by other modules)
"""
from __future__ import print_function, division, absolute_import
from multiprocessing.pool import ThreadPool
import numbers
import itertools
import numpy
import pandas
from scipy.special import expit
try:
import sklearn.model_selection as sklearn_cross_validation
except ImportError:
# The cross_validation module is deprecated as of sklearn 0.18, in favour
# of the model_selection module
import sklearn.cross_validation as sklearn_cross_validation
from sklearn.neighbors.unsupervised import NearestNeighbors
__author__ = "Alex Rogozhnikov"
def _threads_wrapper(func_and_args):
func = func_and_args[0]
args = func_and_args[1:]
return func(*args)
def map_on_cluster(parallel_profile, *args, **kw_args):
"""
The same as map, but the first argument is ipc_profile. Distributes the task over IPython cluster.
:param parallel_profile: the IPython cluster profile to use.
:type parallel_profile: None or str
:param list args: function, arguments
:param dict kw_args: kwargs for LoadBalacedView.map_sync
(function copied from REP)
:return: the result of mapping
"""
if parallel_profile is None:
return map(*args)
elif str.startswith(parallel_profile, 'threads-'):
n_threads = int(parallel_profile[len('threads-'):])
pool = ThreadPool(processes=n_threads)
func, params = args[0], args[1:]
return pool.map(_threads_wrapper, zip(itertools.cycle([func]), *params))
else:
from IPython.parallel import Client
return Client(profile=parallel_profile).load_balanced_view().map_sync(*args, **kw_args)
def sigmoid_function(x, width):
""" Sigmoid function is smoothing of Heaviside function,
the less width, the closer we are to Heaviside function
:type x: array-like with floats, arbitrary shape
:type width: float, if width == 0, this is simply Heaviside function
"""
assert width >= 0, 'the width should be non-negative'
if width > 0.0001:
return expit(x / width)
else:
return (x > 0) * 1.0
def generate_sample(n_samples, n_features, distance=2.0):
"""Generates some test distribution,
signal and background distributions are gaussian with same dispersion and different centers,
all variables are independent (gaussian correlation matrix is identity).
This function is frequently used in tests. """
from sklearn.datasets import make_blobs
centers = numpy.zeros((2, n_features))
centers[0, :] = - distance / 2
centers[1, :] = distance / 2
X, y = make_blobs(n_samples=n_samples, n_features=n_features, centers=centers)
columns = ["column" + str(x) for x in range(n_features)]
X = pandas.DataFrame(X, columns=columns)
return X, y
def check_uniform_label(uniform_label):
"""Convert uniform label to numpy.array
:param uniform_label: label or list of labels (examples: 0, 1, [0], [1], [0, 1])
:return: numpy.array (with [0], [1] or [0, 1])
"""
if isinstance(uniform_label, numbers.Number):
return numpy.array([uniform_label])
else:
return numpy.array(uniform_label)
def train_test_split(*arrays, **kw_args):
"""Does the same thing as train_test_split, but preserves columns in DataFrames.
Uses the same parameters: test_size, train_size, random_state, and has the same interface
:type list[numpy.array|pandas.DataFrame] arrays: arrays to split
"""
assert len(arrays) > 0, "at least one array should be passed"
length = len(arrays[0])
for array in arrays:
assert len(array) == length, "different size"
train_indices, test_indices = sklearn_cross_validation.train_test_split(range(length), **kw_args)
result = []
for array in arrays:
if isinstance(array, pandas.DataFrame):
result.append(array.iloc[train_indices, :])
result.append(array.iloc[test_indices, :])
else:
result.append(array[train_indices])
result.append(array[test_indices])
return result
def weighted_quantile(array, quantiles, sample_weight=None, array_sorted=False, old_style=False):
""" Very close to numpy.percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
:param array: numpy.array with data
:param quantiles: array-like with many percentiles
:param sample_weight: array-like of the same length as `array`
:param array_sorted: bool, if True, then will avoid sorting
:param old_style: if True, will correct output to be consistent with numpy.percentile.
:return: numpy.array with computed percentiles.
"""
array = numpy.array(array)
quantiles = numpy.array(quantiles)
sample_weight = check_sample_weight(array, sample_weight)
assert numpy.all(quantiles >= 0) and numpy.all(quantiles <= 1), 'Percentiles should be in [0, 1]'
if not array_sorted:
sorter = numpy.argsort(array)
array, sample_weight = array[sorter], sample_weight[sorter]
weighted_quantiles = numpy.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= numpy.sum(sample_weight)
return numpy.interp(quantiles, weighted_quantiles, array)
def build_normalizer(signal, sample_weight=None):
"""Prepares normalization function for some set of values
transforms it to uniform distribution from [0, 1]. Example of usage:
>>>normalizer = build_normalizer(signal)
>>>pylab.hist(normalizer(background))
>>># this one should be uniform in [0,1]
>>>pylab.hist(normalizer(signal))
:param numpy.array signal: shape = [n_samples] with floats
:param numpy.array sample_weight: shape = [n_samples], non-negative weights associated to events.
"""
sample_weight = check_sample_weight(signal, sample_weight)
assert numpy.all(sample_weight >= 0.), 'sample weight must be non-negative'
sorter = numpy.argsort(signal)
signal, sample_weight = signal[sorter], sample_weight[sorter]
predictions = numpy.cumsum(sample_weight) / numpy.sum(sample_weight)
def normalizing_function(data):
return numpy.interp(data, signal, predictions)
return normalizing_function
def compute_cut_for_efficiency(efficiency, mask, y_pred, sample_weight=None):
""" Computes such cut(s), that provide given target global efficiency(ies).
Example:
>>> p = classifier.predict_proba(X)
>>> threshold = compute_cut_for_efficiency(0.5, mask=y == 1, y_pred=p[:, 1])
:type efficiency: float or numpy.array with target efficiencies, shape = [n_effs]
:type mask: array-like, shape = [n_samples], True for needed samples
:type y_pred: array-like, shape = [n_samples], predictions or scores (float)
:type sample_weight: None | array-like, shape = [n_samples]
:return: float or numpy.array, shape = [n_effs]
"""
sample_weight = check_sample_weight(mask, sample_weight)
assert len(mask) == len(y_pred), 'lengths are different'
efficiency = numpy.array(efficiency)
is_signal = mask > 0.5
y_pred, sample_weight = y_pred[is_signal], sample_weight[is_signal]
return weighted_quantile(y_pred, 1. - efficiency, sample_weight=sample_weight)
# region Knn-related functions
def compute_knn_indices_of_signal(X, is_signal, n_neighbours=50):
"""For each event returns the knn closest signal(!) events. No matter of what class the event is.
:type X: numpy.array, shape = [n_samples, n_features] the distance is measured over these variables
:type is_signal: numpy.array, shape = [n_samples] with booleans
:rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
"""
assert len(X) == len(is_signal), "Different lengths"
signal_indices = numpy.where(is_signal)[0]
X_signal = numpy.array(X)[numpy.array(is_signal)]
neighbours = NearestNeighbors(n_neighbors=n_neighbours, algorithm='kd_tree').fit(X_signal)
_, knn_signal_indices = neighbours.kneighbors(X)
return numpy.take(signal_indices, knn_signal_indices)
def compute_knn_indices_of_same_class(X, y, n_neighbours=50):
"""
For each event returns the knn closest events of the same class.
:type X: numpy.array, the distance is measured over these variables
:type y: numpy.array, shape = [n_samples] with booleans
:rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
"""
assert len(X) == len(y), "different size"
result = numpy.zeros([len(X), n_neighbours], dtype=numpy.int)
for label in set(y):
is_signal = y == label
label_knn = compute_knn_indices_of_signal(X, is_signal, n_neighbours)
result[is_signal, :] = label_knn[is_signal, :]
return result
# endregion
def indices_of_values(array):
"""For each value in array returns indices with this value
:param array: numpy.array with 1-dimensional initial data
:return: sequence of tuples (value, indices_with_this_value), sequence is ordered by value
"""
indices = numpy.argsort(array)
sorted_array = array[indices]
diff = numpy.nonzero(numpy.ediff1d(sorted_array))[0]
limits = [0] + list(diff + 1) + [len(array)]
for i in range(len(limits) - 1):
yield sorted_array[limits[i]], indices[limits[i]: limits[i + 1]]
def take_features(X, features):
"""
Takes features from dataset.
NOTE: may return view to original data!
:param X: numpy.array or pandas.DataFrame
:param features: list of strings (if pandas.DataFrame) or list of ints
:return: pandas.DataFrame or numpy.array with the same length.
"""
from numbers import Number
are_strings = all([isinstance(feature, str) for feature in features])
are_numbers = all([isinstance(feature, Number) for feature in features])
if are_strings and isinstance(X, pandas.DataFrame):
return X.ix[:, features]
elif are_numbers:
return numpy.array(X)[:, features]
else:
raise NotImplementedError("Can't take features {} from object of type {}".format(features, type(X)))
def check_sample_weight(y_true, sample_weight, normalize=False, normalize_by_class=False):
"""Checks the weights, returns normalized version
:param y_true: numpy.array of shape [n_samples]
:param sample_weight: array-like of shape [n_samples] or None
:param normalize: bool, if True, will scale everything to mean = 1.
:param normalize_by_class: bool, if set, will set equal weight = 1 for each value of y_true.
Better to use normalize if normalize_by_class is used.
:returns: numpy.array with weights of shape [n_samples]"""
if sample_weight is None:
sample_weight = numpy.ones(len(y_true), dtype=numpy.float)
else:
sample_weight = numpy.array(sample_weight, dtype=numpy.float)
assert numpy.ndim(sample_weight) == 1, 'weights vector should be 1-dimensional'
assert len(y_true) == len(sample_weight), \
"The length of weights is different: not {0}, but {1}".format(len(y_true), len(sample_weight))
if normalize_by_class:
sample_weight = numpy.copy(sample_weight)
for value in numpy.unique(y_true):
sample_weight[y_true == value] /= numpy.sum(sample_weight[y_true == value])
if normalize:
sample_weight = sample_weight / numpy.mean(sample_weight)
return sample_weight
def check_xyw(X, y, sample_weight=None, classification=False, allow_multiple_outputs=False):
"""Checks parameters of classifier / loss / metrics.
:param X: array-like of shape [n_samples, n_features] (numpy.array or pandas.DataFrame)
:param y: array-like of shape [n_samples]
:param sample_weight: None or array-like of shape [n_samples]
:return: normalized 3-tuple (X, y, sample_weight)
"""
y = numpy.array(y)
if not allow_multiple_outputs:
assert numpy.ndim(y) == 1, 'y should be one-dimensional'
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
# only pandas.DataFrame and numpy.array are allowed. No checks on sparsity here.
if not (isinstance(X, pandas.DataFrame) or isinstance(X, numpy.ndarray)):
X = numpy.array(X)
if classification:
y = numpy.array(y, dtype=int)
assert len(X) == len(y), 'lengths are different: {} and {}'.format(len(X), len(y))
assert numpy.ndim(X) == 2, 'X should have 2 dimensions'
return X, y, sample_weight
def score_to_proba(score):
"""Compute class probability estimates from decision scores.
Uses logistic function.
:param score: numpy.array of shape [n_samples]
:return: probabilities, numpy.array of shape [n_samples, 2]
"""
proba = numpy.zeros((score.shape[0], 2), dtype=numpy.float)
proba[:, 1] = expit(score)
proba[:, 0] = 1.0 - proba[:, 1]
return proba
def take_last(sequence):
"""
Returns the last element in sequence or raises an error
"""
empty = True
for element in sequence:
empty = False
if empty:
raise IndexError('The sequence is empty.')
else:
return element
def to_pandas_dataframe(X):
"""
Convert 2-dimensional array to DataFrame. If input was a DataFrame, returns itself.
"""
if isinstance(X, pandas.DataFrame):
return X
else:
return pandas.DataFrame(X, columns=['Feature{}'.format(i) for i in range(X.shape[1])])
|
|
#!/usr/bin/env python
"""HTTP API logic that ties API call renderers with HTTP routes."""
import json
from django import http
from werkzeug import exceptions as werkzeug_exceptions
from werkzeug import routing
import logging
from grr.gui import api_call_renderers
from grr.lib import access_control
from grr.lib import rdfvalue
from grr.lib import registry
def BuildToken(request, execution_time):
"""Build an ACLToken from the request."""
if request.method == "GET":
reason = request.GET.get("reason", "")
elif request.method == "POST":
reason = request.POST.get("reason", "")
token = access_control.ACLToken(
username=request.user,
reason=reason,
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime().Now() + execution_time)
for field in ["REMOTE_ADDR", "HTTP_X_FORWARDED_FOR"]:
remote_addr = request.META.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token
HTTP_ROUTING_MAP = routing.Map()
def RegisterHttpRouteHandler(method, route, renderer_cls):
"""Registers given ApiCallRenderer for given method and route."""
HTTP_ROUTING_MAP.add(routing.Rule(
route, methods=[method],
endpoint=renderer_cls))
def GetRendererForHttpRequest(request):
"""Returns a renderer to handle given HTTP request."""
matcher = HTTP_ROUTING_MAP.bind("%s:%s" % (request.environ["SERVER_NAME"],
request.environ["SERVER_PORT"]))
try:
match = matcher.match(request.path, request.method)
except werkzeug_exceptions.NotFound:
raise api_call_renderers.ApiCallRendererNotFoundError(
"No API renderer was found for (%s) %s" % (request.path,
request.method))
renderer_cls, route_args = match
return (renderer_cls(), route_args)
def FillAdditionalArgsFromRequest(request, supported_types):
"""Creates arguments objects from a given request dictionary."""
results = {}
for key, value in request.items():
try:
request_arg_type, request_attr = key.split(".", 1)
except ValueError:
continue
arg_class = None
for key, supported_type in supported_types.items():
if key == request_arg_type:
arg_class = supported_type
if arg_class:
if request_arg_type not in results:
results[request_arg_type] = arg_class()
results[request_arg_type].Set(request_attr, value)
results_list = []
for name, arg_obj in results.items():
additional_args = rdfvalue.ApiCallAdditionalArgs(
name=name, type=supported_types[name].__name__)
additional_args.args = arg_obj
results_list.append(additional_args)
return results_list
class JSONEncoderWithRDFPrimitivesSupport(json.JSONEncoder):
"""Custom JSON encoder that encodes renderers output.
Custom encoder is required to facilitate usage of primitive values -
booleans, integers and strings - in renderers responses.
If renderer references an RDFString, RDFInteger or and RDFBOol when building a
response, it will lead to JSON encoding failure when response encoded,
unless this custom encoder is used. Another way to solve this issue would be
to explicitly call api_value_renderers.RenderValue on every value returned
from the renderer, but it will make the code look overly verbose and dirty.
"""
def default(self, obj):
if isinstance(obj, (rdfvalue.RDFInteger,
rdfvalue.RDFBool,
rdfvalue.RDFString)):
return obj.SerializeToDataStore()
return json.JSONEncoder.default(self, obj)
def BuildResponse(status, rendered_data):
"""Builds HTTPResponse object from rendered data and HTTP status."""
response = http.HttpResponse(status=status, content_type="application/json")
response.write(")]}'\n") # XSSI protection
response.write(json.dumps(rendered_data,
cls=JSONEncoderWithRDFPrimitivesSupport))
return response
def RenderHttpResponse(request):
"""Handles given HTTP request with one of the available API renderers."""
renderer, route_args = GetRendererForHttpRequest(request)
if request.method == "GET":
if renderer.args_type:
unprocessed_request = request.GET
if hasattr(unprocessed_request, "dict"):
unprocessed_request = unprocessed_request.dict()
args = renderer.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
args.Set(type_info.name, route_args[type_info.name])
elif type_info.name in unprocessed_request:
args.Set(type_info.name, unprocessed_request[type_info.name])
if renderer.additional_args_types:
if not hasattr(args, "additional_args"):
raise RuntimeError("Renderer %s defines additional arguments types "
"but its arguments object does not have "
"'additional_args' field." % renderer)
if hasattr(renderer.additional_args_types, "__call__"):
additional_args_types = renderer.additional_args_types()
else:
additional_args_types = renderer.additional_args_types
args.additional_args = FillAdditionalArgsFromRequest(
unprocessed_request, additional_args_types)
else:
args = None
elif request.method == "POST":
try:
payload = json.loads(request.body)
args = renderer.args_type(**payload)
except Exception as e: # pylint: disable=broad-except
response = http.HttpResponse(status=500)
response.write(")]}'\n") # XSSI protection
response.write(json.dumps(dict(message=str(e))))
logging.exception(
"Error while parsing POST request %s (%s): %s",
request.path, request.method, e)
return response
else:
raise RuntimeError("Unsupported method: %s." % request.method)
token = BuildToken(request, renderer.max_execution_time)
try:
rendered_data = api_call_renderers.HandleApiCall(renderer, args,
token=token)
return BuildResponse(200, rendered_data)
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Error while processing %s (%s) with %s: %s", request.path,
request.method, renderer.__class__.__name__, e)
return BuildResponse(500, dict(message=str(e)))
class HttpApiInitHook(registry.InitHook):
"""Register HTTP API renderers."""
def RunOnce(self):
# Doing late import to avoid circular dependency (http_api.py is referenced
# by api_plugins/docs.py).
#
# pylint: disable=g-import-not-at-top
from grr.gui import api_plugins
# pylint: enable=g-import-not-at-top
# The list is alphabetized by route.
RegisterHttpRouteHandler("GET", "/api/aff4/<path:aff4_path>",
api_plugins.aff4.ApiAff4Renderer)
RegisterHttpRouteHandler("GET", "/api/aff4-index/<path:aff4_path>",
api_plugins.aff4.ApiAff4IndexRenderer)
RegisterHttpRouteHandler("GET", "/api/artifacts",
api_plugins.artifact.ApiArtifactRenderer)
RegisterHttpRouteHandler("GET", "/api/clients",
api_plugins.client.ApiClientSearchRenderer)
RegisterHttpRouteHandler("GET", "/api/clients/<client_id>",
api_plugins.client.ApiClientSummaryRenderer)
RegisterHttpRouteHandler("GET", "/api/clients/labels",
api_plugins.client.ApiClientsLabelsListRenderer)
RegisterHttpRouteHandler("POST", "/api/clients/labels/add",
api_plugins.client.ApiClientsAddLabelsRenderer)
RegisterHttpRouteHandler("POST", "/api/clients/labels/remove",
api_plugins.client.ApiClientsRemoveLabelsRenderer)
RegisterHttpRouteHandler("GET", "/api/config",
api_plugins.config.ApiConfigRenderer)
RegisterHttpRouteHandler("GET", "/api/docs",
api_plugins.docs.ApiDocsRenderer)
RegisterHttpRouteHandler("GET", "/api/flows/<client_id>/<flow_id>/status",
api_plugins.client.ApiFlowStatusRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts",
api_plugins.hunt.ApiHuntsListRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>",
api_plugins.hunt.ApiHuntSummaryRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>/errors",
api_plugins.hunt.ApiHuntErrorsRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>/log",
api_plugins.hunt.ApiHuntLogRenderer)
RegisterHttpRouteHandler(
"GET", "/api/reflection/rdfvalue/<type>",
api_plugins.reflection.ApiRDFValueReflectionRenderer)
RegisterHttpRouteHandler(
"GET", "/api/reflection/rdfvalue/all",
api_plugins.reflection.ApiAllRDFValuesReflectionRenderer)
RegisterHttpRouteHandler(
"GET", "/api/stats/store/<component>/metadata",
api_plugins.stats.ApiStatsStoreMetricsMetadataRenderer)
RegisterHttpRouteHandler(
"GET", "/api/stats/store/<component>/metrics/<metric_name>",
api_plugins.stats.ApiStatsStoreMetricRenderer)
RegisterHttpRouteHandler("GET", "/api/users/me/settings",
api_plugins.user.ApiUserSettingsRenderer)
RegisterHttpRouteHandler("POST", "/api/users/me/settings",
api_plugins.user.ApiSetUserSettingsRenderer)
|
|
# -*- coding: utf-8 -*-
"""
:mod:`Solver` -- ODE Solvers
============================
.. module :: solver
.. moduleauthor :: Olivier Verdier <olivier.verdier@gmail.com>
The class :class:`~odelab.solver.Solver` takes care of calling the numerical scheme to produce data, and of storing that data.
The higher level class is :class:`Solver`, which is initialized with an instance of a :class:`Scheme` class.
"""
from __future__ import division
import numpy as np
import itertools
import time
from odelab.plotter import Plotter
import warnings
from .store import Store, SimpleStore
from contextlib import contextmanager
class Solver (object):
"""
General Solver class, that takes care of calling the step function and storing the intermediate results.
"""
def __init__(self, scheme, system, path=None, init_scheme=None):
"""
:Parameters:
system : :class:`odelab.system.System`
Object describing the system. The requirement on that class may vary depending on the scheme.
scheme : :class:`odelab.scheme.Scheme`
Scheme to be used to perform the actual simulation.
path : :string:
Path to the file where to save the produced data (if ``None``, a tempfile is created).
"""
self.system = system
self.scheme = scheme
self.init_scheme = init_scheme
if path is None:
self.store = SimpleStore()
else:
self.store = Store(path)
# max_iter = max_iter_factor * (time/h)
max_iter_factor = 100
def initialize(self, u0=None, name=None):
"""
Initialize the solver to the initial condition :math:`u(0) = u0`.
:param array u0: initial condition; if it is not provided, it is set to the previous initial condition.
:param scalar time: span of the simulation
:param string name: name of this simulation
"""
self.current_scheme = None
if u0 is None: # initial condition not provided
raise self.NotInitialized("You must provide an initial condition.")
if np.isscalar(u0):
u0 = [u0] # todo: test if this is necessary
u0 = np.array(u0)
raw_event0 = np.hstack([u0, 0])
event0 = self.system.preprocess(raw_event0)
self.set_name(name=name)
info = {
'u0':u0,
}
# save system and scheme information in order to recover if unpickling fails
solver_info = {
'system_class': repr(type(self.system)),
'scheme_class': repr(type(self.scheme)),
'init_scheme_class': repr(type(self.init_scheme)),
'solver_class': repr(type(self)),
}
self.store.initialize(event0, self.name)
with self.open_store(write=True):
# store the metadata
self.store['init_params'] = info
self.store['solver_info'] = solver_info
self.store['solver'] = self
# duration counter:
self.store['duration'] = 0.
# append the initial condition:
self.store.append(event0)
@contextmanager
def open_store(self, write=False):
"""
Method to open the data store. Any access to the events must make use of this method::
with solver.open_store() as events:
...
"""
with self.store.open(write) as events:
yield events
def __len__(self):
return len(self.store)
def generate(self, events):
"""
Generates the (t,u) values.
"""
last_event = events[:, -1]
event = last_event
init_stage = self.store.get_nb_stage(events)
tail_length = self.scheme.tail_length
for stage in itertools.count(init_stage): # infinite loop
if stage < tail_length: # not enough past values to run main scheme
if stage == 1:
self.set_scheme(self.init_scheme, events)
elif self.current_scheme is None: # main scheme kicks in
self.set_scheme(self.scheme, events)
event = self.step()
yield event
class FinalTimeNotReached(Exception):
"""
Raised when the final time was not reached within the given ``max_iter`` number of iterations.
"""
class Unstable(Exception):
"""
Raised when the scheme produces NaN values.
"""
class NotInitialized(Exception):
"""
Raised when the solver is not properly initialized.
"""
class RuntimeError(Exception):
"""
Raised to relay an exception occurred while running the solver.
"""
class NotRun(Exception):
"""
Raised when trying to access the events although the solver is empty.
"""
@contextmanager
def simulating(self):
self._start_time = time.time()
with self.open_store(write=True) as events:
yield events
end_time = time.time()
duration = end_time - self._start_time
self.store['duration'] += duration
catch_runtime = True # whether to catch runtime exception (not catching allows to see the traceback)
t_tol = 1e-12 # tolerance to tell whether the final time is reached
def run(self, time, max_iter=None):
"""
Run the simulation for a given time.
:param scalar time: the time span for which to run;
:param max_iter: the maximum number of iterations; if ``None``, an estimate is computed base on the time step and time span
"""
if not hasattr(self,'name'):
raise self.NotInitialized("You must call the `initialize` method before you can run the solver.")
self._max_iter = max_iter
if self._max_iter is None:
# generous estimation of the maximum number of iterations
self._max_iter = int(time/self.scheme.h * self.max_iter_factor)
with self.simulating() as events:
# start from the last time we stopped
generator = self.generate(events)
t0 = events[-1, -1]
tf = t0 + time # final time
if self.with_progressbar:
progress_bar.maxval = time
progress_bar.widgets[0] = self.name
progress_bar.start()
for iteration in range(self._max_iter): # todo: use enumerate
try:
event = next(generator)
except Exception as e:
if self.catch_runtime:
raise self.RuntimeError('%s raised after %d steps: %s' % (type(e).__name__, iteration, e.args), e, iteration)
else:
raise
else:
if np.any(np.isnan(event)):
raise self.Unstable('Unstable after %d steps.' % iteration)
self.store.append(event)
t = event[-1]
if self.with_progressbar:
progress_bar.update(np.real(t-t0))
if t > tf - self.t_tol:
break
else:
raise self.FinalTimeNotReached("Reached maximal number of iterations: {0}".format(self._max_iter))
def set_name(self, name=None):
"""
Set or guess a name for this session.
"""
if name is not None:
self.name = name
else:
guess = self.guess_name()
self.name = guess
def guess_name(self):
return "{system}_{scheme}".format(system=type(self.system).__name__, scheme=type(self.scheme).__name__, )
def get_u(self, index, process=True):
"""
Return u[index] after post-processing.
"""
with self.open_store() as events:
event = events[:, index]
if process:
event = self.system.postprocess(event)
return event
def get_events(self, t0=None, time=None, sampling_rate=1.):
"""
Return the events from time t0, during time `time`, sampled.
"""
ts = self.get_times()
if len(ts) == 1:
raise self.NotRun('The solver has not been run. If you wanted to check the initial condition, use Solver.initial().')
if t0 is None:
t0 = ts[0]
if time is None:
time = ts[-1] - ts[0]
indices = np.where((ts >= t0) & (ts < t0 + time))[0]
size = len(indices)
initial_index = indices[0]
final_index = indices[-1]+1
stride = int(np.ceil(1/sampling_rate))
with self.open_store() as events:
result = events[:, slice(initial_index, final_index, stride)]
return result
def get_times(self):
with self.open_store() as events:
times = events[-1]
return times
def final_time(self):
with self.open_store() as events:
final = events[-1, -1]
return final
def initial(self, process=True):
"""
Convenience method to obtain the initial condition.
"""
return self.get_u(0, process)
def final(self, process=True):
"""
Convenience method to obtain the last computed value.
"""
return self.get_u(-1, process)
def plot(self, *args, **kwargs):
"""
Plot using the plotter object from :method:`odelab.Solver.plotter`.
"""
plotter = self.plotter(*args, **kwargs)
plotter.plot()
return plotter
def plotter(self, components=None, plot_exact=True, error=False, time_component=None, t0=None, time=None, **plot_args):
"""
Constructs a plotter object.
"""
plotter = Plotter(self)
plotter.setup(plot_exact, error)
plotter.components = components
plotter.time_component = time_component
plotter.plot_args = plot_args
plotter.t0 = t0
plotter.time = time
return plotter
def plot_function(self, function, *args, **kwargs):
"""
Plot a given function of the state. May be useful to plot constraints or energy.
This is now a convenience function that calls the method :meth:`plot`.
:param string function: name of the method to call on the current system object
:Example:
the code ``solver.plot_function('energy')`` will call the method ``solver.system.energy`` on the current stored solution points.
"""
return self.plot(*args, components=[function], **kwargs)
def plot2D(self, time_component=0, other_component=1, *args, **kwargs):
"""
Plot components vs another one
"""
return self.plot(other_component, time_component=time_component, *args, **kwargs)
def __repr__(self):
solver = type(self).__name__
scheme = repr(self.scheme)
if self.init_scheme is not None:
init_scheme = '({0})'.format(repr(self.init_scheme))
else:
init_scheme = ''
system = repr(self.system)
return '<{solver}: {scheme}{init_scheme} {system}>'.format(solver=solver, scheme=scheme, init_scheme=init_scheme, system=system)
def set_scheme(self, scheme, events):
self.current_scheme = scheme
self.current_scheme.system = self.system
self.current_scheme.initialize(events)
def step(self):
return self.current_scheme.do_step()
SingleStepSolver = Solver
def load_solver(path, name):
"""
Create a solver object from a path to an hdf5 file.
"""
import tables
with tables.openFile(path, 'r') as f:
events = f.getNode('/'+name)
try:
solver = events.attrs['solver']
except KeyError:
solver = load_solver_v2(path, name)
if not isinstance(solver, Solver): # pickling has failed
warnings.warn('Loading failed')
solver = Solver(scheme=None, system=None, path=path)
solver.name = name
solver.store = Store(path)
solver.store.name = solver.name
return solver
def load_solver_v2(path, name):
"""
Create a solver object from a path to an hdf5 file.
"""
import tables
with tables.openFile(path, 'r') as f:
events = f.getNode('/'+name)
info = events.attrs['solver_info']
if isinstance(info, dict):
system = info['system']
scheme = info['scheme']
solver_class = info['solver_class']
solver = solver_class(system=system, scheme=scheme, path=path)
else:
solver = Solver(scheme=None, system=None, path=path)
solver.name = name
return solver
# try to import progressbar and use it if it is available
try:
import progressbar as pb
widgets = ['', ' ', pb.Timer('%s'), ' ', pb.Percentage(), ' ', pb.Bar('='), ' ', ' ', pb.ETA(), ]
progress_bar = pb.ProgressBar(widgets=widgets)
del pb
with_progressbar = True
except ImportError:
with_progressbar = False
Solver.with_progressbar = with_progressbar
|
|
#Continuous_alternate_GUI_creation
#Description: This module is the core of the sincronization process.
#The main function is con_alt_measure which is called in the module ConAltHandlers.py
from lib.util.SweepType import SweepType
from lib.util.SourceType import SourceType
from lib.util.SourceMode import SourceMode
from lib.SMUSweep import SMUSweep
from xlwt import Workbook,easyxf
from lib.SMUConstant import SMUConstant
from lib.K4200 import K4200
from lib.VnaChannel import VnaChannel
from lib.util.DataTransformers import z_from_s, y_from_s, cga_from_y, cgs_from_y, cga_from_s, cgs_from_s
from gui.VnaMeasure import chunker, write_vector, write_4vectors
from threading import RLock, Thread
from lib.util.VnaEnums import SParameters
import time
import pprint
import numpy
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from PyQt4 import QtCore, QtGui
import MedicionesContinuoAlterno
from PyQt4.QtGui import QDialog
import csv
"""
Prevent that the data dont overlap itself. Due the synchronization between
K4200 & the Agilent (VNA), we need to be sure that the data is arriving correctly.
"""
#Definition of the blocks needed for the sincronization process
vlock = RLock()
klock = RLock()
gdata = {}
params = []
"""
main function con_alt_measure
Inputs: smu_parameters, vna_parameters, delay, connection_keithley, connection_vna, puntosVNA, GUI object
*delay is the time the SMU is at each step
*Points are the number of steps made during the measurement
#conn is a list with the IP and the port
"""
def con_alt_measure(smu_params, vna_params, delay, conn_keithley, conn_vna, puntosVNA, GUI):
#Global variables used for the de-embedding process
global yopen_path
global alpha
print "U R in ConAltMeasure - 'con_alt_measure' "
#Parameters obtaining fom the inputs
points = smu_params["steps"]
sweep_time_SMU = delay*points
params.append(smu_params)
ch = smu_params["index"] + 1
ch2 = ch + 1
if smu_params["mode"] == "voltage":
source_mode = SourceMode.VOLTAGE
source_type = SourceType.VOLTAGE
if smu_params["mode"] == "current":
source_mode = SourceMode.CURRENT
source_type = SourceType.CURRENT
start = smu_params["start"]
stop = smu_params["stop"]
step = smu_params["step"]
compliance = smu_params["compliance"]
sweep_type = SweepType.LINEAR # Always linear!
# Voltage for the second SMU is always 0 (grounded)
Output = 0
#Creation of the SMUs objects
smu = SMUSweep(ch, source_mode, source_type, start, stop, step,
compliance, sweep_type, 'V%s' % ch, "I%s"%ch)
smu2 = SMUConstant(ch2, source_mode, source_type, Output, compliance, 'V%s' % ch2, "I%s" %ch2)
yx = numpy.linspace(start,stop,points) #points include the limits points
#Saving voltages vector
smu_Vlist = []
for i in range(1,points+1):
smu_Vlist.append(float(yx[i-1]))
write_vector(smu_Vlist[1:-1], vna_params["file"] + "_V")
#Define function to measure the SCS (unnecessary)
def measure_keithley(keithley): #Keithley is a K4200 instance object
print "U R in ConAltMeasure - 'measure_keithley' " #flag 4 debug
klock.acquire()
keithley.measure()
klock.release()
params.append(vna_params)
#Saving frecuency vector
VNA_list = []
final = vna_params['freq_stop']
ini = vna_params['freq_start']
yx_VNA = numpy.linspace(ini,final,puntosVNA)
for i in range(1,int(puntosVNA)+1):
VNA_list.append(float(yx_VNA[i-1]))
write_vector(VNA_list, vna_params["file"] + "_F")
#Define function to measure the VNA
def measure_vna(vna):
print "U R in ConAltMeasure - 'measure_vna' "
vlock.acquire()
vna.beep()
vna.trigger()
vna.beep()
vlock.release()
#Initialization of the y_open_pad matrix
vector_zeros_open=[0]*3*points
y_pad_flag=[vector_zeros_open,vector_zeros_open,vector_zeros_open,vector_zeros_open]
yopen_path=[]
for idx_freq in xrange(len(VNA_list)):
yopen_path.append(y_pad_flag)
alpha=1
#Definition of the setup function, needed to communicate with the VNA, get the data and add it to the corresponding matrix.
def setup(execution_flag, idx_freq):
global yopen_device
global yopen_path
global yshort
global graf_cga
global graf_cgs
global cga
global cgs
global matriz_z11
global vector_z11
print "U R in con_alt_measure - Parametros previos del keithley"
#Configuration of the SCS
device = K4200(conn_keithley[0], conn_keithley[1])
device.attach(smu)
device.attach(smu2)
device.configure()
device.executor.execute_command("SS DT {time}".format(time=delay)) #Force a delay time "time" seconds
device.executor.execute_command("SS HT {timeH}".format(timeH=0.0)) #Force a hold time "time" seconds
print "U R in con_alt_measure - Dentro del LOOP de frecuencias"
#Configuration of the VNA
vna = VnaChannel(conn_vna[0], conn_vna[1], 1)
vna.set_one_channel()
vna.set_bus_trigger()
vna.channel = 1
vna.set_continuous(False)
vna.set_immediate()
vna.activate_channel()
vna.set_traces(4) #Activate 4 traces
sparamList = [None, SParameters.S11, SParameters.S12, SParameters.S21, SParameters.S22]
for j in range(1,5):
vna.set_sparam(j, sparamList[j])
vna.activate_trace(j)
vna.set_format(vna_params["format"])
N=3 #measurement resolution
vna.set_points(N*points) #Keithley points
vna.set_sweep_time(sweep_time_SMU)
vna.set_sweep_delay(0.000)
if vna_params["type"] == "center_span":
vna.set_center_span(VNA_list[idx_freq], VNA_list[idx_freq])
elif vna_params["type"] == "start_stop":
vna.set_start_stop(VNA_list[idx_freq], VNA_list[idx_freq])
#First sincronization loop assuming different configuration times
while True:
vlock.acquire()
vna_ready = vna.is_ready()
vlock.release()
if vna_ready:
break
time.sleep(1)
time.sleep(2)
#Definition of the threads
threads = []
measure_vna_t = Thread(target=measure_vna, args=(vna,))
measure_keithley_t = Thread(target=measure_keithley, args=(device,))
#Beginning of the measurement
measure_vna_t.start()
measure_keithley_t.start()
threads.append(measure_vna_t)
threads.append(measure_keithley_t)
for t in threads:
t.join()
while True:
vlock.acquire()
vna_ready = vna.is_ready()
vlock.release()
if vna_ready:
break
time.sleep(1)
#Beginning of the data obtaining. The data from the SCS is not being recovered
threads = []
check_keithley_t = Thread(target=check_keithley, args=(device,smu_params,sweep_time_SMU,))
check_keithley_t.start()
threads.append(check_keithley_t)
for t in threads:
t.join()
threads = []
#The data obteined from the VNA is saved into a different matrix depending on the button pushed
if execution_flag in [0,1,2]:
check_vna_t = Thread(target=check_vna_short_open, args=(vna,vna_params,idx_freq,points, N, execution_flag,))
elif execution_flag==3:
yopen_device_single=yopen_device[idx_freq]
yopen_path_single=yopen_path[idx_freq]
yshort_single=yshort[idx_freq]
check_vna_t = Thread(target=check_vna, args=(vna,vna_params,idx_freq,points, N, yopen_device_single, yopen_path_single, yshort_single,))
elif execution_flag==4:
check_vna_t = Thread(target=check_vna_no_deembedding, args=(vna, vna_params, idx_freq, points, N,))
check_vna_t.start()
threads.append(check_vna_t)
for t in threads:
t.join()
#Addition of a curve to the graph and of a impedance vector to the impedance matrix
if execution_flag in [3,4]:
graf_cga.append(cga)
graf_cgs.append(cgs)
matriz_z11.append(vector_z11)
time.sleep(2)
#Definition of the slots for each option related to each button
def Slot0():
global yopen_device
QtGui.QMessageBox.information(GUI.centralwidget,"Open", "Conectar open - device")
yopen_device=[]
execution_flag=0
for idx_freq in xrange(len(VNA_list)):
setup(execution_flag, idx_freq)
def Slot1():
global yopen_path
global alpha
QtGui.QMessageBox.information(GUI.centralwidget,"Open", "Conectar open - pad")
yopen_path=[]
alpha=0.5
execution_flag=1
for idx_freq in xrange(len(VNA_list)):
setup(execution_flag, idx_freq)
def Slot2():
global yshort
QtGui.QMessageBox.information(GUI.centralwidget,"Short", "Conectar short")
yshort=[]
execution_flag=2
for idx_freq in xrange(len(VNA_list)):
setup(execution_flag, idx_freq)
def Slot3():
global matriz_z11
global vector_z11
global graf_cga
global graf_cgs
global cga
global cgs
execution_flag=3
QtGui.QMessageBox.information(GUI.centralwidget,"DUT", "Conectar DUT")
matriz_z11=[]
graf_cga=[]
graf_cgs=[]
for idx_freq in xrange(len(VNA_list)):
cga=[]
cgs=[]
vector_z11=[]
setup(execution_flag, idx_freq)
graf_v=smu_Vlist[1:-1]
save_matrix(VNA_list, graf_v, matriz_z11, vna_params["file"])
save_data(VNA_list, graf_v, graf_cga, graf_cgs, vna_params["file"])
print graf_v
fig=plt.figure()
gs = gridspec.GridSpec(1, 2)
ax1=fig.add_subplot(gs[0])
for i in graf_cga:
print i
ax1.plot(graf_v, i)
ax2=fig.add_subplot(gs[1])
for i in graf_cgs:
ax2.plot(graf_v, i)
print i
plt.show()
def Slot4():
global matriz_z11
global vector_z11
global graf_cga
global graf_cgs
global cga
global cgs
execution_flag=4
QtGui.QMessageBox.information(GUI.centralwidget,"DUT", "Conectar DUT")
graf_cga=[]
graf_cgs=[]
matriz_z11=[]
for idx_freq in xrange(len(VNA_list)):
cga=[]
cgs=[]
vector_z11=[]
setup(execution_flag, idx_freq)
graf_v=smu_Vlist[1:-1]
save_matrix(VNA_list, graf_v, matriz_z11, vna_params["file"]+'nd')
save_data(VNA_list, graf_v, graf_cga, graf_cgs, vna_params["file"]+'nd')
print graf_v
fig=plt.figure()
gs = gridspec.GridSpec(1, 2)
ax1=fig.add_subplot(gs[0])
for i in graf_cga:
print i
ax1.plot(graf_v, i)
ax2=fig.add_subplot(gs[1])
for i in graf_cgs:
ax2.plot(graf_v, i)
print i
plt.show()
#Creation of the GUI and connection between the slots and the buttons.
GUI.Medui_ui=MedicionesContinuoAlterno.Ui_Medicionescontinuoalterno()
dialog = QDialog()
dialog.ui = GUI.Medui_ui
dialog.ui.setupUi(dialog, Slot0, Slot1, Slot2, Slot3, Slot4)
dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose)
dialog.exec_()
#Definition of the functions used to check if the VNA is done with its actions
def check_vna_short_open(vna, vna_params, idx_freq, points, N, shortoopen):
print "U R in ConAltMeasure - 'check_vna' " #flag 4 debug
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
print "Waiting for VNA"
print "VNA is ready"
vna.beep()
retrieve_short_open(vna, vna_params, idx_freq, points, N, shortoopen)
reset_config(vna)
vna.executor.close()
def check_vna(vna, vna_params, idx_freq, points, N, yopen_device, yopen_path, yshort):
print "U R in ConAltMeasure - 'check_vna' " #flag 4 debug
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
print "Waiting for VNA"
print "VNA is ready"
vna.beep()
retrieve_vna_data(vna, vna_params, idx_freq, points, N, yopen_device, yopen_path, yshort)
reset_config(vna)
vna.executor.close()
def check_vna_no_deembedding(vna, vna_params, idx_freq, points, N):
print "U R in ConAltMeasure - 'check_vna' " #flag 4 debug
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
print "Waiting for VNA"
print "VNA is ready"
vna.beep()
retrieve_vna_data_no_deembedding(vna, vna_params, idx_freq, points, N)
reset_config(vna)
vna.executor.close()
#Definition of the functions used to retrieve data from the VNA and save it to the corresponding matrix
def retrieve_short_open(vna, vna_params, idx_freq, points, N, shortoopen):
global yopen_device
global yopen_path
global yshort
sdata = []
#freq_data = []
template = ":CALC{ch}:TRAC{trace}:DATA:FDAT?" #template = "CALC:SEL:DATA:FDAT"
channel = 1 # [S11,S12,S21,S22]
for trac in range(1,5):
data = vna.executor.ask(template.format(ch=str(channel),trace=str(trac)))
data = data.split(',')
data = [complex(float(pair[0]), float(pair[1])) for pair in chunker(data, 2)]
sdata.append(data)
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
print sdata
if len(sdata) == 4:
print "----Sdata----"
print sdata
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
ydata = y_from_s(sdata)
print ydata
yguardar=[]
for data in ydata:
b7=data[N:N*(points-1)]
c7=[]
for i in range(0,points-2):
c7.append(b7[N/2+N*i])
yguardar.append(c7)
if shortoopen == 0:
yopen_device.append(ydata)
write_4vectors(yguardar, vna_params["file"] + "_open_device" + str(idx_freq))
if shortoopen == 1:
yopen_path.append(ydata)
write_4vectors(yguardar, vna_params["file"] + "_open_pad" + str(idx_freq))
if shortoopen == 2:
yshort.append(ydata)
write_4vectors(yguardar, vna_params["file"] + "_short" + str(idx_freq))
def retrieve_vna_data(vna, vna_params, idx_freq, points, N, yopen_device, yopen_path, yshort):
global alpha
global cga
global cgs
global vector_z11
print "U R in ConAltMeasure - 'retrieve_vna_data' " #flag 4 debug
sdata = []
freq_data = []
template = ":CALC{ch}:TRAC{trace}:DATA:FDAT?" #template = "CALC:SEL:DATA:FDAT"
channel = 1 #1 #El orden correcto es [S11,S12,S21,S22]
for trac in range(1,5):
data = vna.executor.ask(template.format(ch=str(channel),trace=str(trac)))
data = data.split(',')
data = [complex(float(pair[0]), float(pair[1])) for pair in chunker(data, 2)]
sdata.append(data)
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
print sdata
if len(sdata) == 4:
print "----Sdata----"
print sdata
template_freq = ":SENS{ch}:FREQ:DATA?"
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
freq_data = vna.executor.ask(template_freq.format(ch=1))
freq_data = freq_data.split(',')
freq_data = [float(fdatum) for fdatum in freq_data]
print freq_data
ydata = y_from_s(sdata)
zdata = z_from_s(sdata)
print "----ydata----"
print ydata
print "----zdata----"
print zdata
y_tran_data=[[],[],[],[]]
for indice in range(len(sdata[0])):
y_open_device=numpy.matrix([[yopen_device[0][indice], yopen_device[1][indice]],[yopen_device[2][indice], yopen_device[3][indice]]])
y_open_path=numpy.matrix([[yopen_path[0][indice], yopen_path[1][indice]],[yopen_path[2][indice], yopen_path[3][indice]]])
y_short=numpy.matrix([[yshort[0][indice], yshort[1][indice]],[yshort[2][indice], yshort[3][indice]]])
y_dut=numpy.matrix([[ydata[0][indice], ydata[1][indice]],[ydata[2][indice], ydata[3][indice]]])
y_tran=((((y_dut-(alpha*(y_open_device-y_open_path)))**-1)-((y_short-(alpha*(y_open_device-y_open_path)))**-1))**-1)-((1-alpha)*(y_open_device-y_open_path))
y_tran_data[0].append(y_tran.item(0,0))
y_tran_data[1].append(y_tran.item(0,1))
y_tran_data[2].append(y_tran.item(1,0))
y_tran_data[3].append(y_tran.item(1,1))
print "----freqdata----"
print freq_data
print "----ytrandata----"
print y_tran_data
cga_data = cga_from_y(freq_data, y_tran_data)
cgs_data = cgs_from_y(freq_data, y_tran_data)
b4=cga_data[N:N*(points-1)]
c4=[]
for i in range(0,points-2):
c4.append(b4[N/2+N*i])
cga_data=c4
b5=cgs_data[N:N*(points-1)]
c5=[]
for i in range(0,points-2):
c5.append(b5[N/2+N*i])
cgs_data=c5
print "----CGA----data----"
print cga_data
print "----CGS----data----"
print cgs_data
cga=cga_data
cgs=cgs_data
sdataflag=[]
for data in sdata:
b1=data[N:N*(points-1)]
c1=[]
for i in range(0,points-2):
c1.append(b1[N/2+N*i])
sdataflag.append(c1)
sdata=sdataflag
zdataflag=[]
for data in zdata:
b2=data[N:N*(points-1)]
c2=[]
for i in range(0,points-2):
c2.append(b2[N/2+N*i])
zdataflag.append(c2)
zdata=zdataflag
vector_z11=zdata[0]
ydataflag=[]
for data in ydata:
b3=data[N:N*(points-1)]
c3=[]
for i in range(0,points-2):
c3.append(b3[N/2+N*i])
ydataflag.append(c3)
ydata=ydataflag
ytrandataflag=[]
for data in y_tran_data:
b6=data[N:N*(points-1)]
c6=[]
for i in range(0,points-2):
c6.append(b6[N/2+N*i])
ytrandataflag.append(c6)
y_tran_data=ytrandataflag
print "----ytrandatacorto----"
print y_tran_data
#Inicializando arreglos para guardar sweep de resultados:
write_vector(freq_data, vna_params["file"] + "_freqs" + str(idx_freq))
write_4vectors(sdata, vna_params["file"] + "_s" + str(idx_freq))
write_4vectors(zdata, vna_params["file"] + "_z" + str(idx_freq))
write_4vectors(ydata, vna_params["file"] + "_y" + str(idx_freq))
write_4vectors(y_tran_data, vna_params["file"] + "_y_transistor" + str(idx_freq))
write_vector(cga_data, vna_params["file"] + "_CGA" + str(idx_freq))
write_vector(cgs_data, vna_params["file"] + "_CGS" + str(idx_freq))
def retrieve_vna_data_no_deembedding(vna, vna_params, idx_freq, points, N):
global cga
global cgs
global vector_z11
print "U R in ConAltMeasure - 'retrieve_vna_data' " #flag 4 debug
sdata = []
freq_data = []
template = ":CALC{ch}:TRAC{trace}:DATA:FDAT?" #template = "CALC:SEL:DATA:FDAT"
channel = 1 # [S11,S12,S21,S22]
for trac in range(1,5):
data = vna.executor.ask(template.format(ch=str(channel),trace=str(trac)))
data = data.split(',')
data = [complex(float(pair[0]), float(pair[1])) for pair in chunker(data, 2)]
sdata.append(data)
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
if len(sdata) == 4:
print "----Sdata----"
print sdata
template_freq = ":SENS{ch}:FREQ:DATA?"
while True:
vlock.acquire()
is_ready = vna.is_ready() #ask *OPC?
vlock.release()
if is_ready:
break
time.sleep(1)
freq_data = vna.executor.ask(template_freq.format(ch=1))
freq_data = freq_data.split(',')
freq_data = [float(fdatum) for fdatum in freq_data]
print freq_data
ydata = y_from_s(sdata)
zdata = z_from_s(sdata)
print "----ydata----"
print ydata
print "----zdata----"
print zdata
cga_data = cga_from_s(freq_data, sdata)
cgs_data = cgs_from_s(freq_data, sdata)
print "----CGA----data----"
print cga_data
print "----CGS----data----"
print cgs_data
b4=cga_data[N:N*(points-1)]
c4=[]
for i in range(0,points-2):
c4.append(b4[N/2+N*i])
cga_data=c4
b5=cgs_data[N:N*(points-1)]
c5=[]
for i in range(0,points-2):
c5.append(b5[N/2+N*i])
cgs_data=c5
print "----CGA----data----"
print cga_data
print "----CGS----data----"
print cgs_data
cga=cga_data
cgs=cgs_data
sdataflag=[]
for data in sdata:
b1=data[N:N*(points-1)]
c1=[]
for i in range(0,points-2):
c1.append(b1[N/2+N*i])
sdataflag.append(c1)
sdata=sdataflag
zdataflag=[]
for data in zdata:
b2=data[N:N*(points-1)]
c2=[]
for i in range(0,points-2):
c2.append(b2[N/2+N*i])
zdataflag.append(c2)
zdata=zdataflag
vector_z11=zdata[0]
ydataflag=[]
for data in ydata:
b3=data[N:N*(points-1)]
c3=[]
for i in range(0,points-2):
c3.append(b3[N/2+N*i])
ydataflag.append(c3)
ydata=ydataflag
#Inicializando arreglos para guardar sweep de resultados:
write_vector(freq_data, vna_params["file"] + "_freqs_nd" + str(idx_freq))
write_4vectors(sdata, vna_params["file"] + "_s_nd" + str(idx_freq))
write_4vectors(zdata, vna_params["file"] + "_z_nd" + str(idx_freq))
write_4vectors(ydata, vna_params["file"] + "_y_nd" + str(idx_freq))
write_vector(cga_data, vna_params["file"] + "_CGA_nd" + str(idx_freq))
write_vector(cgs_data, vna_params["file"] + "_CGS_nd" + str(idx_freq))
#Definition of the functions used to check if the SCS is done with its actions
def check_keithley(device, smu_params,sweep_time_SMU):
print "U R in ConAltMeasure - 'check_keithley' " #flag 4 debug
while True:
klock.acquire()
is_ready = device.is_ready()
klock.release()
if is_ready:
break
time.sleep(1)
print "Waiting for K4200"
print "K4200 is ready"
#retrieve_keithley_data(device, smu_params)
device.executor.close()
time.sleep(sweep_time_SMU)
#Definition of the functions used to retrieve data from the SCS and save it to the corresponding matrix (not used)
def retrieve_keithley_data(device, smu_params):
print "U R in ConAltMeasure - 'retrieve_keithley_data' " #flag 4 debug
ch = smu_params["index"] + 1
cmd = "DO 'CH{ch}T'".format(ch=ch)
data = device.executor.ask(cmd)
print "Keithley Data:"
pprint.pprint(data)
data = data.split(",")
gdata["pol"] = data
print "----Keithley----data----"
print data
#Definition of the function used to save an impedance matrix
def save_matrix(Frequencies,Voltages,Matrix,filename):
wb = Workbook()
sheet = wb.add_sheet('Z11')
for indexv,voltage in enumerate(Voltages):
sheet.write(indexv+1,0,voltage)
for indexf,frequency in enumerate(Frequencies):
sheet.write(0,indexf+1,frequency)
for indexvec, vec in enumerate(Matrix):
for indexmeas, meas in enumerate(vec):
sheet.write(indexmeas+1,indexvec+1,str(meas.real)+"+"+str(meas.imag)+"j")
wb.save(filename+'Matrix_z11.xls')
def save_data(Frequencies, Voltages, Cgas, Cgcs, filename):
def list_difference(A,B):
C=[]
for index,vala in enumerate(A):
c=A[index]-B[index]
C.append(c)
return C
wb = Workbook()
Pages=['Cga','Cgc','Cgb']
Labels=['Frequency','Capacitance']
Units=['Hz','F']
sheets=[]
for indexp,page in enumerate(Pages):
sheet = wb.add_sheet(page)
sheets.append(sheet)
sheet.write(0,0,'Voltage',easyxf('font: name Times New Roman, color-index black, bold on'))
sheet.write(1,0,'V',easyxf('font: name Times New Roman, color-index black, bold off'))
for indexv,voltage in enumerate(Voltages):
sheet.write(indexv+2,0,voltage)
flag0=0
for indexf,frequency in enumerate(Frequencies):
for indexl,label in enumerate(Labels):
sheet.write(0,2*flag0+indexl+1,label,easyxf('font: name Times New Roman, color-index black, bold on'))
sheet.write(1,2*flag0+indexl+1,Units[indexl],easyxf('font: name Times New Roman, color-index black, bold off'))
flag0=flag0+1
sheet.write(2,2*indexf+1,frequency)
if indexp == 0:
for indexval,value in enumerate(Cgas[indexf]):
sheet.write(indexval+2,2*(indexf+1),value)
if indexp == 1:
for indexval,value in enumerate(Cgcs[indexf]):
sheet.write(indexval+2,2*(indexf+1),value)
if indexp == 2:
Cgb=list_difference(Cgas[indexf],Cgcs[indexf])
for indexval,value in enumerate(Cgb):
sheet.write(indexval+2,2*(indexf+1),value)
wb.save(filename+'_Results.xls')
#Definition of the function used to reset the configuration of the VNA
def reset_config(vna):
print "U R in ConAltMeasure - 'reset_config' " #flag 4 debug
vna.set_internal_trigger()
vna.set_one_channel()
for ch in [1,2,3,4]:
vna.channel = ch
vna.set_immediate()
|
|
#!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
|
|
# Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ACE object handling according to WebDAV ACP specification.
"""
from pyomni.webdav.acp.Principal import Principal
from pyomni.webdav.acp.GrantDeny import GrantDeny
from pyomni.webdav import Constants
from pyomni.webdav.Connection import WebdavError
__version__ = "$LastChangedRevision$"
class ACE(object):
"""
This class provides functionality for handling ACEs
@ivar principal: A principal (user or group)
@type principal: L{Principal} object
@ivar invert: Flag indicating whether ACE should invert the principal.
@type invert: C{bool}
@ivar grantDenies: Grant or deny clauses for privileges
@type grantDenies: C{list} of L{GrantDeny} objects
@ivar protected: Flag indicating whether ACE is protected.
@type protected: C{bool}
@ivar inherited: URL indicating the source from where the ACE is inherited.
@type inherited: C{string}
"""
# restrict instance variables
__slots__ = ('principal', 'invert', 'grantDenies', 'protected', 'inherited')
def __init__(self, domroot=None, principal=None, grantDenies=None):
"""
Constructor should be called with either no parameters (create blank ACE),
one parameter (a DOM tree or principal), or two parameters (principal and
sequence of GrantDenies).
@param domroot: A DOM tree (default: None).
@type domroot: L{webdav.WebdavResponse.Element} object
@param principal: A principal (user or group), (default: None).
@type principal: L{Principal} object
@param grantDenies: Grant and deny clauses for privileges (default: None).
@type grantDenies: sequence of L{GrantDeny} objects
@raise WebdavError: When non-valid parameters are passed a L{WebdavError} is raised.
"""
self.principal = Principal()
self.protected = None
self.inherited = None
self.invert = None
self.grantDenies = []
if domroot:
self.principal = Principal(domroot=domroot.find(Constants.TAG_PRINCIPAL, Constants.NS_DAV))
self.inherited = domroot.find(Constants.TAG_INHERITED, Constants.NS_DAV)
if self.inherited:
self.inherited = self.inherited.children[0].textof()
if domroot.find(Constants.TAG_PROTECTED, Constants.NS_DAV):
self.protected = 1
for child in domroot.children:
if child.ns == Constants.NS_DAV \
and (child.name == Constants.TAG_GRANT or child.name == Constants.TAG_DENY):
self.grantDenies.append(GrantDeny(domroot=child))
elif isinstance(principal, Principal):
newPrincipal = Principal()
newPrincipal.copy(principal)
self.principal = newPrincipal
if (isinstance(grantDenies, list) or isinstance(grantDenies, tuple)):
self.addGrantDenies(grantDenies)
elif domroot == None and grantDenies == None:
# no param ==> blank ACE
pass
else:
# This shouldn't happen, someone screwed up with the params ...
raise WebdavError('non-valid parameters handed to ACE constructor')
def __cmp__(self, other):
if not isinstance(other, ACE):
return 1
if self.principal == other.principal \
and self.invert == other.invert \
and self.protected == other.protected \
and self.inherited == other.inherited:
equal = 1
for grantDeny in self.grantDenies:
inList = 0
for otherGrantDeny in other.grantDenies:
if grantDeny == otherGrantDeny:
inList = 1
if inList == 0:
equal = 0
return not equal
else:
return 1
def __repr__(self):
repr = '<class ACE: '
if self.invert:
repr += 'inverted principal, ' % (self.invert)
if self.principal:
repr += 'principal: %s, ' % (self.principal)
if self.protected:
repr += 'protected, '
if self.inherited:
repr += 'inherited href: %s, ' % (self.inherited)
first = 1
repr += 'grantDenies: ['
for grantDeny in self.grantDenies:
if first:
repr += '%s' % grantDeny
first = 0
else:
repr += ', %s' % grantDeny
return '%s]>' % (repr)
def copy(self, other):
'''Copy an ACE object.
@param other: Another ACE to copy.
@type other: L{ACE} object
@raise WebdavError: When an object that is not an L{ACE} is passed
a L{WebdavError} is raised.
'''
if not isinstance(other, ACE):
raise WebdavError('Non-ACE object passed to copy method: %s.' % other.__class__)
self.invert = other.invert
self.protected = other.protected
self.inherited = other.inherited
self.principal = Principal()
if other.principal:
self.principal.copy(other.principal)
if other.grantDenies:
self.addGrantDenies(other.grantDenies)
def isValid(self):
"""
Returns true/false (1/0) whether necessarry props
principal and grantDenies are set and whether the ACE contains one
grant or deny clauses.
@return: Validity of ACE.
@rtype: C{bool}
"""
return self.principal and len(self.grantDenies) == 1
def isGrant(self):
'''
Returns true/false (1/0) if ACE contains only grant clauses.
@return: Value whether the ACE is of grant type.
@rtype: C{bool}
'''
if self.isMixed() or len(self.grantDenies) < 1:
return 0
else:
return self.grantDenies[0].isGrant()
def isDeny(self):
'''
Returns true/false (1/0) if ACE contains only deny clauses.
@return: Value whether the ACE is of deny type.
@rtype: C{bool}
'''
if self.isMixed() or len(self.grantDenies) < 1:
return 0
else:
return self.grantDenies[0].isDeny()
def isMixed(self):
'''
Returns true/false (1/0) if ACE contains both types (grant and deny) of clauses.
@return: Value whether the ACE is of mixed (grant and deny) type.
@rtype: C{bool}
'''
mixed = 0
if len(self.grantDenies):
first = self.grantDenies[0].grantDeny
for grantDeny in self.grantDenies:
if grantDeny.grantDeny != first:
mixed = 1
return mixed
def toXML(self, defaultNameSpace=None):
"""
Returns ACE content as a string of valid XML as described in WebDAV ACP.
@param defaultNameSpace: Name space (default: None).
@type defaultNameSpace: C(string)
"""
assert self.isValid(), "ACE is not initialized or does not contain valid content!"
ACE = 'D:' + Constants.TAG_ACE
res = self.principal.toXML(self.invert)
for grantDeny in self.grantDenies:
res += grantDeny.toXML()
if self.protected:
res += '<D:protected/>'
if self.inherited:
res += '<D:inherited><D:href>%s</D:href></D:inherited>' % (self.inherited)
return '<%s>%s</%s>' % (ACE, res, ACE)
def setPrincipal(self, principal):
'''
Sets the passed principal on the ACE.
@param principal: A principal.
@type principal: L{Principal} object
'''
self.principal = Principal()
self.principal.copy(principal)
def setInherited(self, href):
'''
Sets the passed URL on the ACE to denote from where it is inherited.
@param href: A URL.
@type href: C{string}
'''
self.inherited = href
def addGrantDeny(self, grantDeny):
'''
Adds the passed GrantDeny object to list if it's not in it, yet.
@param grantDeny: A grant or deny clause.
@type grantDeny: L{GrantDeny} object
'''
# only add it if it's not in the list, yet ...
inList = 0
for element in self.grantDenies:
if element == grantDeny:
inList = 1
if not inList:
newGrantDeny = GrantDeny()
newGrantDeny.copy(grantDeny)
self.grantDenies.append(newGrantDeny)
def addGrantDenies(self, grantDenies):
'''Adds the list of passed grant/deny objects to list.
@param grantDenies: Grant or deny clauses.
@type grantDenies: sequence of L{GrantDeny} objects
'''
map(lambda grantDeny: self.addGrantDeny(grantDeny), grantDenies)
def delGrantDeny(self, grantDeny):
'''Deletes the passed GrantDeny object from list.
@param grantDeny: A grant or deny clause.
@type grantDeny: L{GrantDeny} object
@raise WebdavError: A L{WebdavError} is raised if the clause to be
deleted is not present.
'''
# only add it if it's not in the list, yet ...
count = 0
index = 0
for element in self.grantDenies:
count += 1
if element == grantDeny:
index = count
if index:
self.grantDenies.pop(index - 1)
else:
raise WebdavError('GrantDeny to be deleted not in list: %s.' % grantDeny)
def delGrantDenies(self, grantDenies):
'''Deletes the list of passed grant/deny objects from list.
@param grantDenies: Grant or deny clauses.
@type grantDenies: sequence of L{GrantDeny} objects
'''
map(lambda grantDeny: self.delGrantDeny(grantDeny), grantDenies)
|
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides methods and Pythonic idioms that make it easy to navigate,
search, and modify the parse tree.
Beautiful Soup works with Python 2.7 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.5.3"
__copyright__ = "Copyright (c) 2004-2017 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import traceback
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'<>'You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, change code that looks like this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
**kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. Suggest you use "
"features='lxml' for HTML and features='lxml-xml' for "
"XML.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if from_encoding and isinstance(markup, unicode):
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
from_encoding = None
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
original_features = features
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
if not (original_features == builder.NAME or
original_features in builder.ALTERNATE_NAMES):
if builder.is_xml:
markup_type = "XML"
else:
markup_type = "HTML"
caller = traceback.extract_stack()[0]
filename = caller[0]
line_number = caller[1]
warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(
filename=filename,
line_number=line_number,
parser=builder.NAME,
markup_type=markup_type))
self.builder = builder
self.is_xml = builder.is_xml
self.known_xml = self.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256 and (
(isinstance(markup, bytes) and not b'<' in markup)
or (isinstance(markup, unicode) and not u'<' in markup)
):
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
if isinstance(markup, unicode):
markup = markup.encode("utf8")
warnings.warn(
'"%s" looks like a filename, not markup. You should'
'probably open this file and pass the filehandle into'
'Beautiful Soup.' % markup)
self._check_markup_is_url(markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(
markup, from_encoding, exclude_encodings=exclude_encodings)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def __copy__(self):
copy = type(self)(
self.encode('utf-8'), builder=self.builder, from_encoding='utf-8'
)
# Although we encoded the tree to UTF-8, that may not have
# been the encoding of the original markup. Set the copy's
# .original_encoding to reflect the original object's
# .original_encoding.
copy.original_encoding = self.original_encoding
return copy
def __getstate__(self):
# Frequently a tree builder can't be pickled.
d = dict(self.__dict__)
if 'builder' in d and not self.builder.picklable:
d['builder'] = None
return d
@staticmethod
def _check_markup_is_url(markup):
"""
Check if markup looks like it's actually a url and raise a warning
if so. Markup can be unicode or str (py2) / bytes (py3).
"""
if isinstance(markup, bytes):
space = b' '
cant_start_with = (b"http:", b"https:")
elif isinstance(markup, unicode):
space = u' '
cant_start_with = (u"http:", u"https:")
else:
return
if any(markup.startswith(prefix) for prefix in cant_start_with):
if not space in markup:
if isinstance(markup, bytes):
decoded_markup = markup.decode('utf-8', 'replace')
else:
decoded_markup = markup
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an'
' HTTP client. You should probably use an HTTP client like'
' requests to get the document behind the URL, and feed'
' that document to Beautiful Soup.' % decoded_markup
)
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
return subclass(s)
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
previous_element = most_recent_element or self._most_recent_element
next_element = previous_sibling = next_sibling = None
if isinstance(o, Tag):
next_element = o.next_element
next_sibling = o.next_sibling
previous_sibling = o.previous_sibling
if not previous_element:
previous_element = o.previous_element
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
self._most_recent_element = o
parent.contents.append(o)
if parent.next_sibling:
# This node is being inserted into an element that has
# already been parsed. Deal with any dangling references.
index = len(parent.contents)-1
while index >= 0:
if parent.contents[index] is o:
break
index -= 1
else:
raise ValueError(
"Error building tree: supposedly %r was inserted "
"into %r after the fact, but I don't see it!" % (
o, parent
)
)
if index == 0:
previous_element = parent
previous_sibling = None
else:
previous_element = previous_sibling = parent.contents[index-1]
if index == len(parent.contents)-1:
next_element = parent.next_sibling
next_sibling = None
else:
next_element = next_sibling = parent.contents[index+1]
o.previous_element = previous_element
if previous_element:
previous_element.next_element = o
o.next_element = next_element
if next_element:
next_element.previous_element = o
o.next_sibling = next_sibling
if next_sibling:
next_sibling.previous_sibling = o
o.previous_sibling = previous_sibling
if previous_sibling:
previous_sibling.next_sibling = o
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occurred
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
from sqlalchemy import and_
from sqlalchemy import event
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import db_base_plugin_common
from neutron.db import ipam_non_pluggable_backend
from neutron.db import ipam_pluggable_backend
from neutron.db import models_v2
from neutron.db import rbac_db_models as rbac_db
from neutron.db import sqlalchemyutils
from neutron.extensions import l3
from neutron.i18n import _LE, _LI
from neutron import ipam
from neutron.ipam import subnet_alloc
from neutron import manager
from neutron import neutron_plugin_base_v2
from neutron.plugins.common import constants as service_constants
LOG = logging.getLogger(__name__)
# Ports with the following 'device_owner' values will not prevent
# network deletion. If delete_network() finds that all ports on a
# network have these owners, it will explicitly delete each port
# and allow network deletion to continue. Similarly, if delete_subnet()
# finds out that all existing IP Allocations are associated with ports
# with these owners, it will allow subnet deletion to proceed with the
# IP allocations being cleaned up by cascade.
AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP]
def _check_subnet_not_used(context, subnet_id):
try:
kwargs = {'context': context, 'subnet_id': subnet_id}
registry.notify(
resources.SUBNET, events.BEFORE_DELETE, None, **kwargs)
except exceptions.CallbackFailure as e:
raise n_exc.SubnetInUse(subnet_id=subnet_id, reason=e)
class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
neutron_plugin_base_v2.NeutronPluginBaseV2):
"""V2 Neutron plugin interface implementation using SQLAlchemy models.
Whenever a non-read call happens the plugin will call an event handler
class method (e.g., network_created()). The result is that this class
can be sub-classed by other classes that add custom behaviors on certain
events.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
def __init__(self):
self.set_ipam_backend()
if cfg.CONF.notify_nova_on_port_status_changes:
from neutron.notifiers import nova
# NOTE(arosen) These event listeners are here to hook into when
# port status changes and notify nova about their change.
self.nova_notifier = nova.Notifier()
event.listen(models_v2.Port, 'after_insert',
self.nova_notifier.send_port_status)
event.listen(models_v2.Port, 'after_update',
self.nova_notifier.send_port_status)
event.listen(models_v2.Port.status, 'set',
self.nova_notifier.record_port_status_changed)
def set_ipam_backend(self):
if cfg.CONF.ipam_driver:
self.ipam = ipam_pluggable_backend.IpamPluggableBackend()
else:
self.ipam = ipam_non_pluggable_backend.IpamNonPluggableBackend()
def _validate_host_route(self, route, ip_version):
try:
netaddr.IPNetwork(route['destination'])
netaddr.IPAddress(route['nexthop'])
except netaddr.core.AddrFormatError:
err_msg = _("Invalid route: %s") % route
raise n_exc.InvalidInput(error_message=err_msg)
except ValueError:
# netaddr.IPAddress would raise this
err_msg = _("Invalid route: %s") % route
raise n_exc.InvalidInput(error_message=err_msg)
self._validate_ip_version(ip_version, route['nexthop'], 'nexthop')
self._validate_ip_version(ip_version, route['destination'],
'destination')
def _validate_shared_update(self, context, id, original, updated):
# The only case that needs to be validated is when 'shared'
# goes from True to False
if updated['shared'] == original.shared or updated['shared']:
return
ports = self._model_query(
context, models_v2.Port).filter(
and_(
models_v2.Port.network_id == id,
models_v2.Port.device_owner !=
constants.DEVICE_OWNER_ROUTER_GW,
models_v2.Port.device_owner !=
constants.DEVICE_OWNER_FLOATINGIP))
subnets = self._model_query(
context, models_v2.Subnet).filter(
models_v2.Subnet.network_id == id)
tenant_ids = set([port['tenant_id'] for port in ports] +
[subnet['tenant_id'] for subnet in subnets])
# raise if multiple tenants found or if the only tenant found
# is not the owner of the network
if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and
tenant_ids.pop() != original.tenant_id):
raise n_exc.InvalidSharedSetting(network=original.name)
def _validate_ipv6_attributes(self, subnet, cur_subnet):
if cur_subnet:
self._validate_ipv6_update_dhcp(subnet, cur_subnet)
return
ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode'))
address_mode_set = attributes.is_attr_set(
subnet.get('ipv6_address_mode'))
self._validate_ipv6_dhcp(ra_mode_set, address_mode_set,
subnet['enable_dhcp'])
if ra_mode_set and address_mode_set:
self._validate_ipv6_combination(subnet['ipv6_ra_mode'],
subnet['ipv6_address_mode'])
if address_mode_set or ra_mode_set:
self._validate_eui64_applicable(subnet)
def _validate_eui64_applicable(self, subnet):
# Per RFC 4862, section 5.5.3, prefix length and interface
# id together should be equal to 128. Currently neutron supports
# EUI64 interface id only, thus limiting the prefix
# length to be 64 only.
if ipv6_utils.is_auto_address_subnet(subnet):
if netaddr.IPNetwork(subnet['cidr']).prefixlen != 64:
msg = _('Invalid CIDR %s for IPv6 address mode. '
'OpenStack uses the EUI-64 address format, '
'which requires the prefix to be /64.')
raise n_exc.InvalidInput(
error_message=(msg % subnet['cidr']))
def _validate_ipv6_combination(self, ra_mode, address_mode):
if ra_mode != address_mode:
msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode "
"set to '%(addr_mode)s' is not valid. "
"If both attributes are set, they must be the same value"
) % {'ra_mode': ra_mode, 'addr_mode': address_mode}
raise n_exc.InvalidInput(error_message=msg)
def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp):
if (ra_mode_set or address_mode_set) and not enable_dhcp:
msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when "
"enable_dhcp is set to False.")
raise n_exc.InvalidInput(error_message=msg)
def _validate_ipv6_update_dhcp(self, subnet, cur_subnet):
if ('enable_dhcp' in subnet and not subnet['enable_dhcp']):
msg = _("Cannot disable enable_dhcp with "
"ipv6 attributes set")
ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode'))
address_mode_set = attributes.is_attr_set(
subnet.get('ipv6_address_mode'))
if ra_mode_set or address_mode_set:
raise n_exc.InvalidInput(error_message=msg)
old_ra_mode_set = attributes.is_attr_set(
cur_subnet.get('ipv6_ra_mode'))
old_address_mode_set = attributes.is_attr_set(
cur_subnet.get('ipv6_address_mode'))
if old_ra_mode_set or old_address_mode_set:
raise n_exc.InvalidInput(error_message=msg)
def _create_bulk(self, resource, context, request_items):
objects = []
collection = "%ss" % resource
items = request_items[collection]
context.session.begin(subtransactions=True)
try:
for item in items:
obj_creator = getattr(self, 'create_%s' % resource)
objects.append(obj_creator(context, item))
context.session.commit()
except Exception:
context.session.rollback()
with excutils.save_and_reraise_exception():
LOG.error(_LE("An exception occurred while creating "
"the %(resource)s:%(item)s"),
{'resource': resource, 'item': item})
return objects
def create_network_bulk(self, context, networks):
return self._create_bulk('network', context, networks)
def create_network(self, context, network):
"""Handle creation of a single network."""
# single request processing
n = network['network']
# NOTE(jkoelker) Get the tenant_id outside of the session to avoid
# unneeded db action if the operation raises
tenant_id = self._get_tenant_id_for_create(context, n)
with context.session.begin(subtransactions=True):
args = {'tenant_id': tenant_id,
'id': n.get('id') or uuidutils.generate_uuid(),
'name': n['name'],
'admin_state_up': n['admin_state_up'],
'mtu': n.get('mtu', constants.DEFAULT_NETWORK_MTU),
'status': n.get('status', constants.NET_STATUS_ACTIVE)}
# TODO(pritesh): Move vlan_transparent to the extension module.
# vlan_transparent here is only added if the vlantransparent
# extension is enabled.
if ('vlan_transparent' in n and n['vlan_transparent'] !=
attributes.ATTR_NOT_SPECIFIED):
args['vlan_transparent'] = n['vlan_transparent']
network = models_v2.Network(**args)
if n['shared']:
entry = rbac_db.NetworkRBAC(
network=network, action='access_as_shared',
target_tenant='*', tenant_id=network['tenant_id'])
context.session.add(entry)
context.session.add(network)
return self._make_network_dict(network, process_extensions=False,
context=context)
def update_network(self, context, id, network):
n = network['network']
with context.session.begin(subtransactions=True):
network = self._get_network(context, id)
# validate 'shared' parameter
if 'shared' in n:
entry = None
for item in network.rbac_entries:
if (item.action == 'access_as_shared' and
item.target_tenant == '*'):
entry = item
break
setattr(network, 'shared', True if entry else False)
self._validate_shared_update(context, id, network, n)
update_shared = n.pop('shared')
if update_shared and not entry:
entry = rbac_db.NetworkRBAC(
network=network, action='access_as_shared',
target_tenant='*', tenant_id=network['tenant_id'])
context.session.add(entry)
elif not update_shared and entry:
context.session.delete(entry)
context.session.expire(network, ['rbac_entries'])
network.update(n)
return self._make_network_dict(network, context=context)
def delete_network(self, context, id):
with context.session.begin(subtransactions=True):
network = self._get_network(context, id)
context.session.query(models_v2.Port).filter_by(
network_id=id).filter(
models_v2.Port.device_owner.
in_(AUTO_DELETE_PORT_OWNERS)).delete(synchronize_session=False)
port_in_use = context.session.query(models_v2.Port).filter_by(
network_id=id).first()
if port_in_use:
raise n_exc.NetworkInUse(net_id=id)
# clean up subnets
subnets = self._get_subnets_by_network(context, id)
for subnet in subnets:
self.delete_subnet(context, subnet['id'])
context.session.delete(network)
def get_network(self, context, id, fields=None):
network = self._get_network(context, id)
return self._make_network_dict(network, fields, context=context)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'network', limit, marker)
make_network_dict = functools.partial(self._make_network_dict,
context=context)
return self._get_collection(context, models_v2.Network,
make_network_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_networks_count(self, context, filters=None):
return self._get_collection_count(context, models_v2.Network,
filters=filters)
def create_subnet_bulk(self, context, subnets):
return self._create_bulk('subnet', context, subnets)
def _validate_ip_version(self, ip_version, addr, name):
"""Check IP field of a subnet match specified ip version."""
ip = netaddr.IPNetwork(addr)
if ip.version != ip_version:
data = {'name': name,
'addr': addr,
'ip_version': ip_version}
msg = _("%(name)s '%(addr)s' does not match "
"the ip_version '%(ip_version)s'") % data
raise n_exc.InvalidInput(error_message=msg)
def _validate_subnet(self, context, s, cur_subnet=None):
"""Validate a subnet spec."""
# This method will validate attributes which may change during
# create_subnet() and update_subnet().
# The method requires the subnet spec 's' has 'ip_version' field.
# If 's' dict does not have 'ip_version' field in an API call
# (e.g., update_subnet()), you need to set 'ip_version' field
# before calling this method.
ip_ver = s['ip_version']
if attributes.is_attr_set(s.get('cidr')):
self._validate_ip_version(ip_ver, s['cidr'], 'cidr')
# TODO(watanabe.isao): After we found a way to avoid the re-sync
# from the agent side, this restriction could be removed.
if cur_subnet:
dhcp_was_enabled = cur_subnet.enable_dhcp
else:
dhcp_was_enabled = False
if s.get('enable_dhcp') and not dhcp_was_enabled:
subnet_prefixlen = netaddr.IPNetwork(s['cidr']).prefixlen
error_message = _("Subnet has a prefix length that is "
"incompatible with DHCP service enabled.")
if ((ip_ver == 4 and subnet_prefixlen > 30) or
(ip_ver == 6 and subnet_prefixlen > 126)):
raise n_exc.InvalidInput(error_message=error_message)
else:
# NOTE(watanabe.isao): The following restriction is necessary
# only when updating subnet.
if cur_subnet:
range_qry = context.session.query(models_v2.
IPAvailabilityRange).join(models_v2.IPAllocationPool)
ip_range = range_qry.filter_by(subnet_id=s['id']).first()
if not ip_range:
raise n_exc.IpAddressGenerationFailure(
net_id=cur_subnet.network_id)
if attributes.is_attr_set(s.get('gateway_ip')):
self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip')
if (cfg.CONF.force_gateway_on_subnet and
not ipam.utils.check_gateway_in_subnet(
s['cidr'], s['gateway_ip'])):
error_message = _("Gateway is not valid on subnet")
raise n_exc.InvalidInput(error_message=error_message)
# Ensure the gateway IP is not assigned to any port
# skip this check in case of create (s parameter won't have id)
# NOTE(salv-orlando): There is slight chance of a race, when
# a subnet-update and a router-interface-add operation are
# executed concurrently
if cur_subnet and not ipv6_utils.is_ipv6_pd_enabled(s):
alloc_qry = context.session.query(models_v2.IPAllocation)
allocated = alloc_qry.filter_by(
ip_address=cur_subnet['gateway_ip'],
subnet_id=cur_subnet['id']).first()
if allocated and allocated['port_id']:
raise n_exc.GatewayIpInUse(
ip_address=cur_subnet['gateway_ip'],
port_id=allocated['port_id'])
if attributes.is_attr_set(s.get('dns_nameservers')):
if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers:
raise n_exc.DNSNameServersExhausted(
subnet_id=s.get('id', _('new subnet')),
quota=cfg.CONF.max_dns_nameservers)
for dns in s['dns_nameservers']:
try:
netaddr.IPAddress(dns)
except Exception:
raise n_exc.InvalidInput(
error_message=(_("Error parsing dns address %s") %
dns))
self._validate_ip_version(ip_ver, dns, 'dns_nameserver')
if attributes.is_attr_set(s.get('host_routes')):
if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes:
raise n_exc.HostRoutesExhausted(
subnet_id=s.get('id', _('new subnet')),
quota=cfg.CONF.max_subnet_host_routes)
# check if the routes are all valid
for rt in s['host_routes']:
self._validate_host_route(rt, ip_ver)
if ip_ver == 4:
if attributes.is_attr_set(s.get('ipv6_ra_mode')):
raise n_exc.InvalidInput(
error_message=(_("ipv6_ra_mode is not valid when "
"ip_version is 4")))
if attributes.is_attr_set(s.get('ipv6_address_mode')):
raise n_exc.InvalidInput(
error_message=(_("ipv6_address_mode is not valid when "
"ip_version is 4")))
if ip_ver == 6:
self._validate_ipv6_attributes(s, cur_subnet)
def _validate_subnet_for_pd(self, subnet):
"""Validates that subnet parameters are correct for IPv6 PD"""
if (subnet.get('ip_version') != constants.IP_VERSION_6):
reason = _("Prefix Delegation can only be used with IPv6 "
"subnets.")
raise n_exc.BadRequest(resource='subnets', msg=reason)
mode_list = [constants.IPV6_SLAAC,
constants.DHCPV6_STATELESS,
attributes.ATTR_NOT_SPECIFIED]
ra_mode = subnet.get('ipv6_ra_mode')
if ra_mode not in mode_list:
reason = _("IPv6 RA Mode must be SLAAC or Stateless for "
"Prefix Delegation.")
raise n_exc.BadRequest(resource='subnets', msg=reason)
address_mode = subnet.get('ipv6_address_mode')
if address_mode not in mode_list:
reason = _("IPv6 Address Mode must be SLAAC or Stateless for "
"Prefix Delegation.")
raise n_exc.BadRequest(resource='subnets', msg=reason)
def _update_router_gw_ports(self, context, network, subnet):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin:
gw_ports = self._get_router_gw_ports_by_network(context,
network['id'])
router_ids = [p['device_id'] for p in gw_ports]
ctx_admin = context.elevated()
ext_subnets_dict = {s['id']: s for s in network['subnets']}
for id in router_ids:
router = l3plugin.get_router(ctx_admin, id)
external_gateway_info = router['external_gateway_info']
# Get all stateful (i.e. non-SLAAC/DHCPv6-stateless) fixed ips
fips = [f for f in external_gateway_info['external_fixed_ips']
if not ipv6_utils.is_auto_address_subnet(
ext_subnets_dict[f['subnet_id']])]
num_fips = len(fips)
# Don't add the fixed IP to the port if it already
# has a stateful fixed IP of the same IP version
if num_fips > 1:
continue
if num_fips == 1 and netaddr.IPAddress(
fips[0]['ip_address']).version == subnet['ip_version']:
continue
external_gateway_info['external_fixed_ips'].append(
{'subnet_id': subnet['id']})
info = {'router': {'external_gateway_info':
external_gateway_info}}
l3plugin.update_router(context, id, info)
def _create_subnet(self, context, subnet, subnetpool_id):
s = subnet['subnet']
with context.session.begin(subtransactions=True):
network = self._get_network(context, s["network_id"])
subnet, ipam_subnet = self.ipam.allocate_subnet(context,
network,
s,
subnetpool_id)
if hasattr(network, 'external') and network.external:
self._update_router_gw_ports(context,
network,
subnet)
# If this subnet supports auto-addressing, then update any
# internal ports on the network with addresses for this subnet.
if ipv6_utils.is_auto_address_subnet(subnet):
self.ipam.add_auto_addrs_on_network_ports(context, subnet,
ipam_subnet)
return self._make_subnet_dict(subnet, context=context)
def _get_subnetpool_id(self, subnet):
"""Returns the subnetpool id for this request
If the pool id was explicitly set in the request then that will be
returned, even if it is None.
Otherwise, the default pool for the IP version requested will be
returned. This will either be a pool id or None (the default for each
configuration parameter). This implies that the ip version must be
either set implicitly with a specific cidr or explicitly using
ip_version attribute.
:param subnet: The subnet dict from the request
"""
subnetpool_id = subnet.get('subnetpool_id',
attributes.ATTR_NOT_SPECIFIED)
if subnetpool_id != attributes.ATTR_NOT_SPECIFIED:
return subnetpool_id
cidr = subnet.get('cidr')
if attributes.is_attr_set(cidr):
ip_version = netaddr.IPNetwork(cidr).version
else:
ip_version = subnet.get('ip_version')
if not attributes.is_attr_set(ip_version):
msg = _('ip_version must be specified in the absence of '
'cidr and subnetpool_id')
raise n_exc.BadRequest(resource='subnets', msg=msg)
if ip_version == 4:
return cfg.CONF.default_ipv4_subnet_pool
return cfg.CONF.default_ipv6_subnet_pool
def create_subnet(self, context, subnet):
s = subnet['subnet']
cidr = s.get('cidr', attributes.ATTR_NOT_SPECIFIED)
prefixlen = s.get('prefixlen', attributes.ATTR_NOT_SPECIFIED)
has_cidr = attributes.is_attr_set(cidr)
has_prefixlen = attributes.is_attr_set(prefixlen)
if has_cidr and has_prefixlen:
msg = _('cidr and prefixlen must not be supplied together')
raise n_exc.BadRequest(resource='subnets', msg=msg)
if has_cidr:
# turn the CIDR into a proper subnet
net = netaddr.IPNetwork(s['cidr'])
subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen)
s['tenant_id'] = self._get_tenant_id_for_create(context, s)
subnetpool_id = self._get_subnetpool_id(s)
if subnetpool_id:
self.ipam.validate_pools_with_subnetpool(s)
if subnetpool_id == constants.IPV6_PD_POOL_ID:
if has_cidr:
# We do not currently support requesting a specific
# cidr with IPv6 prefix delegation. Set the subnetpool_id
# to None and allow the request to continue as normal.
subnetpool_id = None
self._validate_subnet(context, s)
else:
prefix = constants.PROVISIONAL_IPV6_PD_PREFIX
subnet['subnet']['cidr'] = prefix
self._validate_subnet_for_pd(s)
else:
if not has_cidr:
msg = _('A cidr must be specified in the absence of a '
'subnet pool')
raise n_exc.BadRequest(resource='subnets', msg=msg)
self._validate_subnet(context, s)
return self._create_subnet(context, subnet, subnetpool_id)
def _update_allocation_pools(self, subnet):
"""Gets new allocation pools and formats them correctly"""
allocation_pools = self.ipam.generate_pools(subnet['cidr'],
subnet['gateway_ip'])
return [{'start': str(netaddr.IPAddress(p.first,
subnet['ip_version'])),
'end': str(netaddr.IPAddress(p.last, subnet['ip_version']))}
for p in allocation_pools]
def update_subnet(self, context, id, subnet):
"""Update the subnet with new info.
The change however will not be realized until the client renew the
dns lease or we support gratuitous DHCP offers
"""
s = subnet['subnet']
new_cidr = s.get('cidr')
db_subnet = self._get_subnet(context, id)
# Fill 'ip_version' and 'allocation_pools' fields with the current
# value since _validate_subnet() expects subnet spec has 'ip_version'
# and 'allocation_pools' fields.
s['ip_version'] = db_subnet.ip_version
s['cidr'] = db_subnet.cidr
s['id'] = db_subnet.id
s['tenant_id'] = db_subnet.tenant_id
s['subnetpool_id'] = db_subnet.subnetpool_id
self._validate_subnet(context, s, cur_subnet=db_subnet)
db_pools = [netaddr.IPRange(p['first_ip'], p['last_ip'])
for p in db_subnet.allocation_pools]
update_ports_needed = False
if new_cidr and ipv6_utils.is_ipv6_pd_enabled(s):
# This is an ipv6 prefix delegation-enabled subnet being given an
# updated cidr by the process_prefix_update RPC
s['cidr'] = new_cidr
update_ports_needed = True
net = netaddr.IPNetwork(s['cidr'], s['ip_version'])
# Update gateway_ip and allocation pools based on new cidr
s['gateway_ip'] = utils.get_first_host_ip(net, s['ip_version'])
s['allocation_pools'] = self._update_allocation_pools(s)
range_pools = None
if s.get('allocation_pools') is not None:
# Convert allocation pools to IPRange to simplify future checks
range_pools = self.ipam.pools_to_ip_range(s['allocation_pools'])
self.ipam.validate_allocation_pools(range_pools, s['cidr'])
s['allocation_pools'] = range_pools
# If either gateway_ip or allocation_pools were specified
gateway_ip = s.get('gateway_ip')
if gateway_ip is not None or s.get('allocation_pools') is not None:
if gateway_ip is None:
gateway_ip = db_subnet.gateway_ip
pools = range_pools if range_pools is not None else db_pools
self.ipam.validate_gw_out_of_pools(gateway_ip, pools)
with context.session.begin(subtransactions=True):
subnet, changes = self.ipam.update_db_subnet(context, id, s,
db_pools)
result = self._make_subnet_dict(subnet, context=context)
# Keep up with fields that changed
result.update(changes)
if update_ports_needed:
# Find ports that have not yet been updated
# with an IP address by Prefix Delegation, and update them
ports = self.get_ports(context)
routers = []
for port in ports:
fixed_ips = []
new_port = {'port': port}
for ip in port['fixed_ips']:
if ip['subnet_id'] == s['id']:
fixed_ip = {'subnet_id': s['id']}
if "router_interface" in port['device_owner']:
routers.append(port['device_id'])
fixed_ip['ip_address'] = s['gateway_ip']
fixed_ips.append(fixed_ip)
if fixed_ips:
new_port['port']['fixed_ips'] = fixed_ips
self.update_port(context, port['id'], new_port)
# Send router_update to l3_agent
if routers:
l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
l3_rpc_notifier.routers_updated(context, routers)
return result
def _subnet_check_ip_allocations(self, context, subnet_id):
return (context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet_id).join(models_v2.Port).first())
def _subnet_get_user_allocation(self, context, subnet_id):
"""Check if there are any user ports on subnet and return first."""
# need to join with ports table as IPAllocation's port
# is not joined eagerly and thus producing query which yields
# incorrect results
return (context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet_id).join(models_v2.Port).
filter(~models_v2.Port.device_owner.
in_(AUTO_DELETE_PORT_OWNERS)).first())
def _subnet_check_ip_allocations_internal_router_ports(self, context,
subnet_id):
# Do not delete the subnet if IP allocations for internal
# router ports still exist
allocs = context.session.query(models_v2.IPAllocation).filter_by(
subnet_id=subnet_id).join(models_v2.Port).filter(
models_v2.Port.device_owner.in_(
constants.ROUTER_INTERFACE_OWNERS)
).first()
if allocs:
LOG.debug("Subnet %s still has internal router ports, "
"cannot delete", subnet_id)
raise n_exc.SubnetInUse(subnet_id=id)
def delete_subnet(self, context, id):
with context.session.begin(subtransactions=True):
subnet = self._get_subnet(context, id)
# Make sure the subnet isn't used by other resources
_check_subnet_not_used(context, id)
# Delete all network owned ports
qry_network_ports = (
context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet['id']).
join(models_v2.Port))
# Remove network owned ports, and delete IP allocations
# for IPv6 addresses which were automatically generated
# via SLAAC
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if is_auto_addr_subnet:
self._subnet_check_ip_allocations_internal_router_ports(
context, id)
else:
qry_network_ports = (
qry_network_ports.filter(models_v2.Port.device_owner.
in_(AUTO_DELETE_PORT_OWNERS)))
network_ports = qry_network_ports.all()
if network_ports:
for port in network_ports:
context.session.delete(port)
# Check if there are more IP allocations, unless
# is_auto_address_subnet is True. In that case the check is
# unnecessary. This additional check not only would be wasteful
# for this class of subnet, but is also error-prone since when
# the isolation level is set to READ COMMITTED allocations made
# concurrently will be returned by this query
if not is_auto_addr_subnet:
alloc = self._subnet_check_ip_allocations(context, id)
if alloc:
LOG.info(_LI("Found port (%(port_id)s, %(ip)s) having IP "
"allocation on subnet "
"%(subnet)s, cannot delete"),
{'ip': alloc.ip_address,
'port_id': alloc.port_id,
'subnet': id})
raise n_exc.SubnetInUse(subnet_id=id)
context.session.delete(subnet)
# Delete related ipam subnet manually,
# since there is no FK relationship
self.ipam.delete_subnet(context, id)
def get_subnet(self, context, id, fields=None):
subnet = self._get_subnet(context, id)
return self._make_subnet_dict(subnet, fields, context=context)
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
return self._get_subnets(context, filters, fields, sorts, limit,
marker, page_reverse)
def get_subnets_count(self, context, filters=None):
return self._get_collection_count(context, models_v2.Subnet,
filters=filters)
def get_subnets_by_network(self, context, network_id):
return [self._make_subnet_dict(subnet_db) for subnet_db in
self._get_subnets_by_network(context, network_id)]
def _create_subnetpool_prefix(self, context, cidr, subnetpool_id):
prefix_args = {'cidr': cidr, 'subnetpool_id': subnetpool_id}
subnetpool_prefix = models_v2.SubnetPoolPrefix(**prefix_args)
context.session.add(subnetpool_prefix)
def _validate_address_scope_id(self, context, address_scope_id,
subnetpool_id, sp_prefixes):
"""Validate the address scope before associating.
Subnetpool can associate with an address scope if
- the tenant user is the owner of both the subnetpool and
address scope
- the admin is associating the subnetpool with the shared
address scope
- there is no prefix conflict with the existing subnetpools
associated with the address scope.
"""
if not attributes.is_attr_set(address_scope_id):
return
if not self.is_address_scope_owned_by_tenant(context,
address_scope_id):
raise n_exc.IllegalSubnetPoolAssociationToAddressScope(
subnetpool_id=subnetpool_id, address_scope_id=address_scope_id)
subnetpools = self._get_subnetpools_by_address_scope_id(
context, address_scope_id)
new_set = netaddr.IPSet(sp_prefixes)
for sp in subnetpools:
if sp.id == subnetpool_id:
continue
sp_set = netaddr.IPSet([prefix['cidr'] for prefix in sp.prefixes])
if sp_set.intersection(new_set):
raise n_exc.AddressScopePrefixConflict()
def _check_subnetpool_update_allowed(self, context, subnetpool_id,
address_scope_id):
"""Check if the subnetpool can be updated or not.
If the subnetpool is associated to a shared address scope not owned
by the tenant, then the subnetpool cannot be updated.
"""
if not self.is_address_scope_owned_by_tenant(context,
address_scope_id):
msg = _("subnetpool %(subnetpool_id)s cannot be updated when"
" associated with shared address scope "
"%(address_scope_id)s") % {
'subnetpool_id': subnetpool_id,
'address_scope_id': address_scope_id}
raise n_exc.IllegalSubnetPoolUpdate(reason=msg)
def create_subnetpool(self, context, subnetpool):
"""Create a subnetpool"""
sp = subnetpool['subnetpool']
sp_reader = subnet_alloc.SubnetPoolReader(sp)
if sp_reader.address_scope_id is attributes.ATTR_NOT_SPECIFIED:
sp_reader.address_scope_id = None
self._validate_address_scope_id(context, sp_reader.address_scope_id,
id, sp_reader.prefixes)
tenant_id = self._get_tenant_id_for_create(context, sp)
with context.session.begin(subtransactions=True):
pool_args = {'tenant_id': tenant_id,
'id': sp_reader.id,
'name': sp_reader.name,
'ip_version': sp_reader.ip_version,
'default_prefixlen':
sp_reader.default_prefixlen,
'min_prefixlen': sp_reader.min_prefixlen,
'max_prefixlen': sp_reader.max_prefixlen,
'shared': sp_reader.shared,
'default_quota': sp_reader.default_quota,
'address_scope_id': sp_reader.address_scope_id}
subnetpool = models_v2.SubnetPool(**pool_args)
context.session.add(subnetpool)
for prefix in sp_reader.prefixes:
self._create_subnetpool_prefix(context,
prefix,
subnetpool.id)
return self._make_subnetpool_dict(subnetpool)
def _update_subnetpool_prefixes(self, context, prefix_list, id):
with context.session.begin(subtransactions=True):
context.session.query(models_v2.SubnetPoolPrefix).filter_by(
subnetpool_id=id).delete()
for prefix in prefix_list:
model_prefix = models_v2.SubnetPoolPrefix(cidr=prefix,
subnetpool_id=id)
context.session.add(model_prefix)
def _updated_subnetpool_dict(self, model, new_pool):
updated = {}
new_prefixes = new_pool.get('prefixes', attributes.ATTR_NOT_SPECIFIED)
orig_prefixes = [str(x.cidr) for x in model['prefixes']]
if new_prefixes is not attributes.ATTR_NOT_SPECIFIED:
orig_set = netaddr.IPSet(orig_prefixes)
new_set = netaddr.IPSet(new_prefixes)
if not orig_set.issubset(new_set):
msg = _("Existing prefixes must be "
"a subset of the new prefixes")
raise n_exc.IllegalSubnetPoolPrefixUpdate(msg=msg)
new_set.compact()
updated['prefixes'] = [str(x.cidr) for x in new_set.iter_cidrs()]
else:
updated['prefixes'] = orig_prefixes
for key in ['id', 'name', 'ip_version', 'min_prefixlen',
'max_prefixlen', 'default_prefixlen', 'shared',
'default_quota', 'address_scope_id']:
self._write_key(key, updated, model, new_pool)
return updated
def _write_key(self, key, update, orig, new_dict):
new_val = new_dict.get(key, attributes.ATTR_NOT_SPECIFIED)
if new_val is not attributes.ATTR_NOT_SPECIFIED:
update[key] = new_dict[key]
else:
update[key] = orig[key]
def update_subnetpool(self, context, id, subnetpool):
"""Update a subnetpool"""
new_sp = subnetpool['subnetpool']
with context.session.begin(subtransactions=True):
orig_sp = self._get_subnetpool(context, id)
updated = self._updated_subnetpool_dict(orig_sp, new_sp)
updated['tenant_id'] = orig_sp.tenant_id
reader = subnet_alloc.SubnetPoolReader(updated)
if orig_sp.address_scope_id:
self._check_subnetpool_update_allowed(context, id,
orig_sp.address_scope_id)
self._validate_address_scope_id(context, reader.address_scope_id,
id, reader.prefixes)
orig_sp.update(self._filter_non_model_columns(
reader.subnetpool,
models_v2.SubnetPool))
self._update_subnetpool_prefixes(context,
reader.prefixes,
id)
for key in ['min_prefixlen', 'max_prefixlen', 'default_prefixlen']:
updated['key'] = str(updated[key])
return updated
def get_subnetpool(self, context, id, fields=None):
"""Retrieve a subnetpool."""
subnetpool = self._get_subnetpool(context, id)
return self._make_subnetpool_dict(subnetpool, fields)
def get_subnetpools(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Retrieve list of subnetpools."""
marker_obj = self._get_marker_obj(context, 'subnetpool', limit, marker)
collection = self._get_collection(context, models_v2.SubnetPool,
self._make_subnetpool_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
return collection
def delete_subnetpool(self, context, id):
"""Delete a subnetpool."""
with context.session.begin(subtransactions=True):
subnetpool = self._get_subnetpool(context, id)
subnets = self._get_subnets_by_subnetpool(context, id)
if subnets:
reason = _("Subnet pool has existing allocations")
raise n_exc.SubnetPoolDeleteError(reason=reason)
context.session.delete(subnetpool)
def _check_mac_addr_update(self, context, port, new_mac, device_owner):
if (device_owner and device_owner.startswith('network:')):
raise n_exc.UnsupportedPortDeviceOwner(
op=_("mac address update"), port_id=id,
device_owner=device_owner)
def create_port_bulk(self, context, ports):
return self._create_bulk('port', context, ports)
def _create_port_with_mac(self, context, network_id, port_data,
mac_address):
try:
# since this method could either be used within or outside the
# transaction, use convenience method to avoid passing a flag
with db_api.autonested_transaction(context.session):
db_port = models_v2.Port(mac_address=mac_address, **port_data)
context.session.add(db_port)
return db_port
except db_exc.DBDuplicateEntry:
raise n_exc.MacAddressInUse(net_id=network_id, mac=mac_address)
def _create_port(self, context, network_id, port_data):
max_retries = cfg.CONF.mac_generation_retries
for i in range(max_retries):
mac = self._generate_mac()
try:
return self._create_port_with_mac(
context, network_id, port_data, mac)
except n_exc.MacAddressInUse:
LOG.debug('Generated mac %(mac_address)s exists on '
'network %(network_id)s',
{'mac_address': mac, 'network_id': network_id})
LOG.error(_LE("Unable to generate mac address after %s attempts"),
max_retries)
raise n_exc.MacAddressGenerationFailure(net_id=network_id)
def create_port(self, context, port):
p = port['port']
port_id = p.get('id') or uuidutils.generate_uuid()
network_id = p['network_id']
# NOTE(jkoelker) Get the tenant_id outside of the session to avoid
# unneeded db action if the operation raises
tenant_id = self._get_tenant_id_for_create(context, p)
if p.get('device_owner'):
self._enforce_device_owner_not_router_intf_or_device_id(
context, p.get('device_owner'), p.get('device_id'), tenant_id)
port_data = dict(tenant_id=tenant_id,
name=p['name'],
id=port_id,
network_id=network_id,
admin_state_up=p['admin_state_up'],
status=p.get('status', constants.PORT_STATUS_ACTIVE),
device_id=p['device_id'],
device_owner=p['device_owner'])
with context.session.begin(subtransactions=True):
# Ensure that the network exists.
self._get_network(context, network_id)
# Create the port
if p['mac_address'] is attributes.ATTR_NOT_SPECIFIED:
db_port = self._create_port(context, network_id, port_data)
p['mac_address'] = db_port['mac_address']
else:
db_port = self._create_port_with_mac(
context, network_id, port_data, p['mac_address'])
self.ipam.allocate_ips_for_port_and_store(context, port, port_id)
return self._make_port_dict(db_port, process_extensions=False)
def _validate_port_for_update(self, context, db_port, new_port, new_mac):
changed_owner = 'device_owner' in new_port
current_owner = (new_port.get('device_owner') or
db_port['device_owner'])
changed_device_id = new_port.get('device_id') != db_port['device_id']
current_device_id = new_port.get('device_id') or db_port['device_id']
if current_owner and changed_device_id or changed_owner:
self._enforce_device_owner_not_router_intf_or_device_id(
context, current_owner, current_device_id,
db_port['tenant_id'])
if new_mac and new_mac != db_port['mac_address']:
self._check_mac_addr_update(context, db_port,
new_mac, current_owner)
def update_port(self, context, id, port):
new_port = port['port']
with context.session.begin(subtransactions=True):
port = self._get_port(context, id)
new_mac = new_port.get('mac_address')
self._validate_port_for_update(context, port, new_port, new_mac)
changes = self.ipam.update_port_with_ips(context, port,
new_port, new_mac)
result = self._make_port_dict(port)
# Keep up with fields that changed
if changes.original or changes.add or changes.remove:
result['fixed_ips'] = self._make_fixed_ip_dict(
changes.original + changes.add)
return result
def delete_port(self, context, id):
with context.session.begin(subtransactions=True):
self.ipam.delete_port(context, id)
def delete_ports_by_device_id(self, context, device_id, network_id=None):
query = (context.session.query(models_v2.Port.id)
.enable_eagerloads(False)
.filter(models_v2.Port.device_id == device_id))
if network_id:
query = query.filter(models_v2.Port.network_id == network_id)
port_ids = [p[0] for p in query]
for port_id in port_ids:
try:
self.delete_port(context, port_id)
except n_exc.PortNotFound:
# Don't raise if something else concurrently deleted the port
LOG.debug("Ignoring PortNotFound when deleting port '%s'. "
"The port has already been deleted.",
port_id)
def get_port(self, context, id, fields=None):
port = self._get_port(context, id)
return self._make_port_dict(port, fields)
def _get_ports_query(self, context, filters=None, sorts=None, limit=None,
marker_obj=None, page_reverse=False):
Port = models_v2.Port
IPAllocation = models_v2.IPAllocation
if not filters:
filters = {}
query = self._model_query(context, Port)
fixed_ips = filters.pop('fixed_ips', {})
ip_addresses = fixed_ips.get('ip_address')
subnet_ids = fixed_ips.get('subnet_id')
if ip_addresses or subnet_ids:
query = query.join(Port.fixed_ips)
if ip_addresses:
query = query.filter(IPAllocation.ip_address.in_(ip_addresses))
if subnet_ids:
query = query.filter(IPAllocation.subnet_id.in_(subnet_ids))
query = self._apply_filters_to_query(query, Port, filters, context)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
query = sqlalchemyutils.paginate_query(query, Port, limit,
sorts, marker_obj)
return query
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'port', limit, marker)
query = self._get_ports_query(context, filters=filters,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [self._make_port_dict(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def get_ports_count(self, context, filters=None):
return self._get_ports_query(context, filters).count()
def _enforce_device_owner_not_router_intf_or_device_id(self, context,
device_owner,
device_id,
tenant_id):
"""Prevent tenants from replacing the device id of router ports with
a router uuid belonging to another tenant.
"""
if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
return
if not context.is_admin:
# check to make sure device_id does not match another tenants
# router.
if device_id:
if hasattr(self, 'get_router'):
try:
ctx_admin = context.elevated()
router = self.get_router(ctx_admin, device_id)
except l3.RouterNotFound:
return
else:
l3plugin = (
manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT))
if l3plugin:
try:
ctx_admin = context.elevated()
router = l3plugin.get_router(ctx_admin,
device_id)
except l3.RouterNotFound:
return
else:
# raise as extension doesn't support L3 anyways.
raise n_exc.DeviceIDNotOwnedByTenant(
device_id=device_id)
if tenant_id != router['tenant_id']:
raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
|
|
from datetime import date
from django.forms import DateField, Form, SelectDateWidget
from django.test import override_settings
from django.utils import translation
from django.utils.dates import MONTHS_AP
from .base import WidgetTest
class SelectDateWidgetTest(WidgetTest):
maxDiff = None
widget = SelectDateWidget(
years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'),
)
def test_render_empty(self):
self.check_html(self.widget, 'mydate', '', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_render_none(self):
"""
Rendering the None or '' values should yield the same output.
"""
self.assertHTMLEqual(
self.widget.render('mydate', None),
self.widget.render('mydate', ''),
)
def test_render_string(self):
self.check_html(self.widget, 'mydate', '2010-04-15', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_render_datetime(self):
self.assertHTMLEqual(
self.widget.render('mydate', date(2010, 4, 15)),
self.widget.render('mydate', '2010-04-15'),
)
def test_render_invalid_date(self):
"""
Invalid dates should still render the failed date.
"""
self.check_html(self.widget, 'mydate', '2010-02-31', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_custom_months(self):
widget = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.check_html(widget, 'mydate', '', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>
"""
))
def test_selectdate_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(
w.render('mydate', ''),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>
""",
)
self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.',
SelectDateWidget, years=('2014',), empty_label=('not enough', 'values'))
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n(self):
w = SelectDateWidget(
years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016')
)
self.assertEqual(
w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-2010',
)
self.assertHTMLEqual(
w.render('date', '13-08-2010'),
"""
<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Even with an invalid date, the widget should reflect the entered value (#17401).
self.assertEqual(w.render('mydate', '2010-02-30').count('selected="selected"'), 3)
# Years before 1900 should work.
w = SelectDateWidget(years=('1899',))
self.assertEqual(
w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-1899',
)
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from future.builtins import zip
from unittest import TestCase, main
from tempfile import mkstemp
from os import close, remove
from os.path import join
from collections import Iterable
from copy import deepcopy
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal
from qiita_core.util import qiita_test_checker
from qiita_core.exceptions import IncompetentQiitaDeveloperError
import qiita_db as qdb
class BaseTestPrepSample(TestCase):
def setUp(self):
self.prep_template = \
qdb.metadata_template.prep_template.PrepTemplate(1)
self.sample_id = '1.SKB8.640193'
self.tester = qdb.metadata_template.prep_template.PrepSample(
self.sample_id, self.prep_template)
self.exp_categories = {'center_name', 'center_project_name',
'emp_status', 'barcode', 'instrument_model',
'library_construction_protocol',
'primer', 'target_subfragment',
'target_gene', 'run_center', 'run_prefix',
'run_date', 'experiment_center',
'experiment_design_description',
'experiment_title', 'platform', 'samp_size',
'sequencing_meth', 'illumina_technology',
'sample_center', 'pcr_primers', 'study_center'}
class TestPrepSampleReadOnly(BaseTestPrepSample):
def test_init_unknown_error(self):
"""Init errors if the PrepSample id is not found in the template"""
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.metadata_template.prep_template.PrepSample(
'Not_a_Sample', self.prep_template)
def test_init_wrong_template(self):
"""Raises an error if using a SampleTemplate instead of PrepTemplate"""
with self.assertRaises(IncompetentQiitaDeveloperError):
qdb.metadata_template.prep_template.PrepSample(
'1.SKB8.640193',
qdb.metadata_template.sample_template.SampleTemplate(1))
def test_init(self):
"""Init correctly initializes the PrepSample object"""
sample = qdb.metadata_template.prep_template.PrepSample(
self.sample_id, self.prep_template)
# Check that the internal id have been correctly set
self.assertEqual(sample._id, '1.SKB8.640193')
# Check that the internal template have been correctly set
self.assertEqual(sample._md_template, self.prep_template)
# Check that the internal dynamic table name have been correctly set
self.assertEqual(sample._dynamic_table, "prep_1")
def test_eq_true(self):
"""Equality correctly returns true"""
other = qdb.metadata_template.prep_template.PrepSample(
self.sample_id, self.prep_template)
self.assertTrue(self.tester == other)
def test_eq_false_type(self):
"""Equality returns false if types are not equal"""
other = qdb.metadata_template.sample_template.Sample(
self.sample_id,
qdb.metadata_template.sample_template.SampleTemplate(1))
self.assertFalse(self.tester == other)
def test_eq_false_id(self):
"""Equality returns false if ids are different"""
other = qdb.metadata_template.prep_template.PrepSample(
'1.SKD8.640184', self.prep_template)
self.assertFalse(self.tester == other)
def test_exists_true(self):
"""Exists returns true if the PrepSample exists"""
self.assertTrue(qdb.metadata_template.prep_template.PrepSample.exists(
self.sample_id, self.prep_template))
def test_exists_false(self):
"""Exists returns false if the PrepSample does not exists"""
self.assertFalse(qdb.metadata_template.prep_template.PrepSample.exists(
'Not_a_Sample', self.prep_template))
def test_get_categories(self):
"""Correctly returns the set of category headers"""
obs = self.tester._get_categories()
self.assertEqual(obs, self.exp_categories)
def test_len(self):
"""Len returns the correct number of categories"""
self.assertEqual(len(self.tester), 22)
def test_getitem_required(self):
"""Get item returns the correct metadata value from the required table
"""
self.assertEqual(self.tester['center_name'], 'ANL')
self.assertTrue(self.tester['center_project_name'] is None)
def test_getitem_dynamic(self):
"""Get item returns the correct metadata value from the dynamic table
"""
self.assertEqual(self.tester['pcr_primers'],
'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT')
self.assertEqual(self.tester['barcode'], 'AGCGCTCACATC')
def test_getitem_id_column(self):
"""Get item returns the correct metadata value from the changed column
"""
self.assertEqual(self.tester['emp_status'], 'EMP')
def test_getitem_error(self):
"""Get item raises an error if category does not exists"""
with self.assertRaises(KeyError):
self.tester['Not_a_Category']
def test_iter(self):
"""iter returns an iterator over the category headers"""
obs = self.tester.__iter__()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_categories)
def test_contains_true(self):
"""contains returns true if the category header exists"""
self.assertTrue('Barcode' in self.tester)
self.assertTrue('barcode' in self.tester)
def test_contains_false(self):
"""contains returns false if the category header does not exists"""
self.assertFalse('Not_a_Category' in self.tester)
def test_keys(self):
"""keys returns an iterator over the metadata headers"""
obs = self.tester.keys()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_categories)
def test_values(self):
"""values returns an iterator over the values"""
obs = self.tester.values()
self.assertTrue(isinstance(obs, Iterable))
exp = {'ANL', None, None, None, 'EMP', 'AGCGCTCACATC',
'This analysis was done as in Caporaso et al 2011 Genome '
'research. The PCR primers (F515/R806) were developed against '
'the V4 region of the 16S rRNA (both bacteria and archaea), '
'which we determined would yield optimal community clustering '
'with reads of this length using a procedure similar to that of'
' ref. 15. [For reference, this primer pair amplifies the '
'region 533_786 in the Escherichia coli strain 83972 sequence '
'(greengenes accession no. prokMSA_id:470367).] The reverse PCR'
' primer is barcoded with a 12-base error-correcting Golay code'
' to facilitate multiplexing of up to 1,500 samples per lane, '
'and both PCR primers contain sequencer adapter regions.',
'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL',
's_G1_L001_sequences', '8/1/12', 'ANL',
'micro biome of soil and rhizosphere of cannabis plants from '
'CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq',
'.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL',
'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME'}
self.assertEqual(set(obs), exp)
def test_items(self):
"""items returns an iterator over the (key, value) tuples"""
obs = self.tester.items()
self.assertTrue(isinstance(obs, Iterable))
exp = {('center_name', 'ANL'), ('center_project_name', None),
('emp_status', 'EMP'), ('barcode', 'AGCGCTCACATC'),
('library_construction_protocol',
'This analysis was done as in Caporaso et al 2011 Genome '
'research. The PCR primers (F515/R806) were developed against '
'the V4 region of the 16S rRNA (both bacteria and archaea), '
'which we determined would yield optimal community clustering '
'with reads of this length using a procedure similar to that '
'of ref. 15. [For reference, this primer pair amplifies the '
'region 533_786 in the Escherichia coli strain 83972 sequence '
'(greengenes accession no. prokMSA_id:470367).] The reverse '
'PCR primer is barcoded with a 12-base error-correcting Golay '
'code to facilitate multiplexing of up to 1,500 samples per '
'lane, and both PCR primers contain sequencer adapter '
'regions.'), ('primer', 'GTGCCAGCMGCCGCGGTAA'),
('target_subfragment', 'V4'), ('target_gene', '16S rRNA'),
('run_center', 'ANL'), ('run_prefix', 's_G1_L001_sequences'),
('run_date', '8/1/12'), ('experiment_center', 'ANL'),
('experiment_design_description',
'micro biome of soil and rhizosphere of cannabis plants '
'from CA'), ('experiment_title', 'Cannabis Soil Microbiome'),
('platform', 'Illumina'),
('instrument_model', 'Illumina MiSeq'), ('samp_size', '.25,g'),
('sequencing_meth', 'Sequencing by synthesis'),
('illumina_technology', 'MiSeq'), ('sample_center', 'ANL'),
('pcr_primers',
'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT'),
('study_center', 'CCME')}
self.assertEqual(set(obs), exp)
def test_get(self):
"""get returns the correct sample object"""
self.assertEqual(self.tester.get('barcode'), 'AGCGCTCACATC')
def test_get_none(self):
"""get returns none if the sample id is not present"""
self.assertTrue(self.tester.get('Not_a_Category') is None)
def test_columns_restrictions(self):
"""that it returns SAMPLE_TEMPLATE_COLUMNS"""
exp = deepcopy(qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS)
exp.update(
qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS_TARGET_GENE)
self.assertEqual(self.prep_template.columns_restrictions, exp)
def test_can_be_updated(self):
"""test if the template can be updated"""
# you can't update restricted colums in a pt with data
self.assertFalse(self.prep_template.can_be_updated({'barcode'}))
# but you can if not restricted
self.assertTrue(self.prep_template.can_be_updated({'center_name'}))
def test_can_be_extended(self):
"""test if the template can be extended"""
# You can always add columns
obs_bool, obs_msg = self.prep_template.can_be_extended([], ["NEW_COL"])
self.assertTrue(obs_bool)
self.assertEqual(obs_msg, "")
# You can't add samples if there are preprocessed data generated
obs_bool, obs_msg = self.prep_template.can_be_extended(
["NEW_SAMPLE"], [])
self.assertFalse(obs_bool)
exp_msg = ("The artifact attached to the prep template has already "
"been processed. No new samples can be added to the prep "
"template")
self.assertEqual(obs_msg, exp_msg)
@qiita_test_checker()
class TestPrepSampleReadWrite(BaseTestPrepSample):
"""Tests the PrepSample class"""
def test_setitem(self):
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
self.tester['column that does not exist'] = 0.3
self.assertEqual(self.tester['center_name'], 'ANL')
self.tester['center_name'] = "FOO"
self.assertEqual(self.tester['center_name'], "FOO")
def test_delitem(self):
"""delitem raises an error (currently not allowed)"""
with self.assertRaises(qdb.exceptions.QiitaDBNotImplementedError):
del self.tester['pcr_primers']
class BaseTestPrepTemplate(TestCase):
def _set_up(self):
self.metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 1',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 2',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 3',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
self.metadata = pd.DataFrame.from_dict(self.metadata_dict,
orient='index')
metadata_prefixed_dict = {
'1.SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 1',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'1.SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 2',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'1.SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 3',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
self.metadata_prefixed = pd.DataFrame.from_dict(metadata_prefixed_dict,
orient='index')
self.test_study = qdb.study.Study(1)
self.data_type = "18S"
self.data_type_id = 2
self.tester = qdb.metadata_template.prep_template.PrepTemplate(1)
self.exp_sample_ids = {
'1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195', '1.SKB4.640189',
'1.SKB5.640181', '1.SKB6.640176', '1.SKB7.640196', '1.SKB8.640193',
'1.SKB9.640200', '1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190', '1.SKD7.640191',
'1.SKD8.640184', '1.SKD9.640182', '1.SKM1.640183', '1.SKM2.640199',
'1.SKM3.640197', '1.SKM4.640180', '1.SKM5.640177', '1.SKM6.640187',
'1.SKM7.640188', '1.SKM8.640201', '1.SKM9.640192'}
self._clean_up_files = []
def tearDown(self):
for f in self._clean_up_files:
remove(f)
class TestPrepTemplateReadOnly(BaseTestPrepTemplate):
def setUp(self):
self._set_up()
def test_study_id(self):
"""Ensure that the correct study ID is returned"""
self.assertEqual(self.tester.study_id, 1)
def test_init_unknown_error(self):
"""Init raises an error if the id is not known"""
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.metadata_template.prep_template.PrepTemplate(3)
def test_init(self):
"""Init successfully instantiates the object"""
st = qdb.metadata_template.prep_template.PrepTemplate(1)
self.assertTrue(st.id, 1)
def test_table_name(self):
"""Table name return the correct string"""
obs = qdb.metadata_template.prep_template.PrepTemplate._table_name(1)
self.assertEqual(obs, "prep_1")
def test_exists_true(self):
"""Exists returns true when the PrepTemplate already exists"""
self.assertTrue(
qdb.metadata_template.prep_template.PrepTemplate.exists(1))
def test_exists_false(self):
"""Exists returns false when the PrepTemplate does not exists"""
self.assertFalse(
qdb.metadata_template.prep_template.PrepTemplate.exists(3))
def test_get_sample_ids(self):
"""get_sample_ids returns the correct set of sample ids"""
obs = self.tester._get_sample_ids()
self.assertEqual(obs, self.exp_sample_ids)
def test_len(self):
"""Len returns the correct number of sample ids"""
self.assertEqual(len(self.tester), 27)
def test_getitem(self):
"""Get item returns the correct sample object"""
obs = self.tester['1.SKM7.640188']
exp = qdb.metadata_template.prep_template.PrepSample(
'1.SKM7.640188', self.tester)
self.assertEqual(obs, exp)
def test_getitem_error(self):
"""Get item raises an error if key does not exists"""
with self.assertRaises(KeyError):
self.tester['Not_a_Sample']
def test_iter(self):
"""iter returns an iterator over the sample ids"""
obs = self.tester.__iter__()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_sample_ids)
def test_contains_true(self):
"""contains returns true if the sample id exists"""
self.assertTrue('1.SKM7.640188' in self.tester)
def test_contains_false(self):
"""contains returns false if the sample id does not exists"""
self.assertFalse('Not_a_Sample' in self.tester)
def test_keys(self):
"""keys returns an iterator over the sample ids"""
obs = self.tester.keys()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_sample_ids)
def test_values(self):
"""values returns an iterator over the values"""
obs = self.tester.values()
self.assertTrue(isinstance(obs, Iterable))
exp = {qdb.metadata_template.prep_template.PrepSample('1.SKB1.640202',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKB2.640194',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKB3.640195',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKB4.640189',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKB5.640181',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKB6.640176',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKB7.640196',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKB8.640193',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKB9.640200',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD1.640179',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD2.640178',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD3.640198',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD4.640185',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD5.640186',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD6.640190',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD7.640191',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD8.640184',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKD9.640182',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM1.640183',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM2.640199',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM3.640197',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM4.640180',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM5.640177',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM6.640187',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM7.640188',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM8.640201',
self.tester),
qdb.metadata_template.prep_template.PrepSample('1.SKM9.640192',
self.tester)}
# Creating a list and looping over it since unittest does not call
# the __eq__ function on the objects
for o, e in zip(sorted(list(obs), key=lambda x: x.id),
sorted(exp, key=lambda x: x.id)):
self.assertEqual(o, e)
def test_items(self):
"""items returns an iterator over the (key, value) tuples"""
obs = self.tester.items()
self.assertTrue(isinstance(obs, Iterable))
exp = [('1.SKB1.640202',
qdb.metadata_template.prep_template.PrepSample('1.SKB1.640202',
self.tester)),
('1.SKB2.640194',
qdb.metadata_template.prep_template.PrepSample('1.SKB2.640194',
self.tester)),
('1.SKB3.640195',
qdb.metadata_template.prep_template.PrepSample('1.SKB3.640195',
self.tester)),
('1.SKB4.640189',
qdb.metadata_template.prep_template.PrepSample('1.SKB4.640189',
self.tester)),
('1.SKB5.640181',
qdb.metadata_template.prep_template.PrepSample('1.SKB5.640181',
self.tester)),
('1.SKB6.640176',
qdb.metadata_template.prep_template.PrepSample('1.SKB6.640176',
self.tester)),
('1.SKB7.640196',
qdb.metadata_template.prep_template.PrepSample('1.SKB7.640196',
self.tester)),
('1.SKB8.640193',
qdb.metadata_template.prep_template.PrepSample('1.SKB8.640193',
self.tester)),
('1.SKB9.640200',
qdb.metadata_template.prep_template.PrepSample('1.SKB9.640200',
self.tester)),
('1.SKD1.640179',
qdb.metadata_template.prep_template.PrepSample('1.SKD1.640179',
self.tester)),
('1.SKD2.640178',
qdb.metadata_template.prep_template.PrepSample('1.SKD2.640178',
self.tester)),
('1.SKD3.640198',
qdb.metadata_template.prep_template.PrepSample('1.SKD3.640198',
self.tester)),
('1.SKD4.640185',
qdb.metadata_template.prep_template.PrepSample('1.SKD4.640185',
self.tester)),
('1.SKD5.640186',
qdb.metadata_template.prep_template.PrepSample('1.SKD5.640186',
self.tester)),
('1.SKD6.640190',
qdb.metadata_template.prep_template.PrepSample('1.SKD6.640190',
self.tester)),
('1.SKD7.640191',
qdb.metadata_template.prep_template.PrepSample('1.SKD7.640191',
self.tester)),
('1.SKD8.640184',
qdb.metadata_template.prep_template.PrepSample('1.SKD8.640184',
self.tester)),
('1.SKD9.640182',
qdb.metadata_template.prep_template.PrepSample('1.SKD9.640182',
self.tester)),
('1.SKM1.640183',
qdb.metadata_template.prep_template.PrepSample('1.SKM1.640183',
self.tester)),
('1.SKM2.640199',
qdb.metadata_template.prep_template.PrepSample('1.SKM2.640199',
self.tester)),
('1.SKM3.640197',
qdb.metadata_template.prep_template.PrepSample('1.SKM3.640197',
self.tester)),
('1.SKM4.640180',
qdb.metadata_template.prep_template.PrepSample('1.SKM4.640180',
self.tester)),
('1.SKM5.640177',
qdb.metadata_template.prep_template.PrepSample('1.SKM5.640177',
self.tester)),
('1.SKM6.640187',
qdb.metadata_template.prep_template.PrepSample('1.SKM6.640187',
self.tester)),
('1.SKM7.640188',
qdb.metadata_template.prep_template.PrepSample('1.SKM7.640188',
self.tester)),
('1.SKM8.640201',
qdb.metadata_template.prep_template.PrepSample('1.SKM8.640201',
self.tester)),
('1.SKM9.640192',
qdb.metadata_template.prep_template.PrepSample('1.SKM9.640192',
self.tester))]
# Creating a list and looping over it since unittest does not call
# the __eq__ function on the objects
for o, e in zip(sorted(list(obs)), sorted(exp)):
self.assertEqual(o, e)
def test_get(self):
"""get returns the correct PrepSample object"""
obs = self.tester.get('1.SKM7.640188')
exp = qdb.metadata_template.prep_template.PrepSample(
'1.SKM7.640188', self.tester)
self.assertEqual(obs, exp)
def test_get_none(self):
"""get returns none if the sample id is not present"""
self.assertTrue(self.tester.get('Not_a_Sample') is None)
def test_data_type(self):
"""data_type returns the string with the data_type"""
self.assertTrue(self.tester.data_type(), "18S")
def test_data_type_id(self):
"""data_type returns the int with the data_type_id"""
self.assertTrue(self.tester.data_type(ret_id=True), 2)
def test_investigation_type(self):
"""investigation_type works correctly"""
self.assertEqual(self.tester.investigation_type, "Metagenomics")
def test_to_dataframe(self):
obs = self.tester.to_dataframe()
# We don't test the specific values as this would blow up the size
# of this file as the amount of lines would go to ~1000
# 27 samples
self.assertEqual(len(obs), 27)
self.assertEqual(set(obs.index), {
u'1.SKB1.640202', u'1.SKB2.640194', u'1.SKB3.640195',
u'1.SKB4.640189', u'1.SKB5.640181', u'1.SKB6.640176',
u'1.SKB7.640196', u'1.SKB8.640193', u'1.SKB9.640200',
u'1.SKD1.640179', u'1.SKD2.640178', u'1.SKD3.640198',
u'1.SKD4.640185', u'1.SKD5.640186', u'1.SKD6.640190',
u'1.SKD7.640191', u'1.SKD8.640184', u'1.SKD9.640182',
u'1.SKM1.640183', u'1.SKM2.640199', u'1.SKM3.640197',
u'1.SKM4.640180', u'1.SKM5.640177', u'1.SKM6.640187',
u'1.SKM7.640188', u'1.SKM8.640201', u'1.SKM9.640192'})
self.assertEqual(set(obs.columns), {
u'center_name', u'center_project_name',
u'emp_status', u'barcode',
u'library_construction_protocol', u'primer',
u'target_subfragment', u'target_gene', u'run_center',
u'run_prefix', u'run_date', u'experiment_center',
u'experiment_design_description', u'experiment_title', u'platform',
u'instrument_model', u'samp_size', u'sequencing_meth',
u'illumina_technology', u'sample_center', u'pcr_primers',
u'study_center'})
def test_clean_validate_template_error_bad_chars(self):
"""Raises an error if there are invalid characters in the sample names
"""
self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']
PT = qdb.metadata_template.prep_template.PrepTemplate
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
PT._clean_validate_template(
self.metadata, 2,
qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS)
def test_clean_validate_template_error_duplicate_cols(self):
"""Raises an error if there are duplicated columns in the template"""
self.metadata['STR_COLUMN'] = pd.Series(['', '', ''],
index=self.metadata.index)
PT = qdb.metadata_template.prep_template.PrepTemplate
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateHeaderError):
PT._clean_validate_template(
self.metadata, 2,
qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS)
def test_clean_validate_template_error_duplicate_samples(self):
"""Raises an error if there are duplicated samples in the templates"""
self.metadata.index = ['sample.1', 'sample.1', 'sample.3']
PT = qdb.metadata_template.prep_template.PrepTemplate
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateSamplesError):
PT._clean_validate_template(
self.metadata, 2,
qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS)
def test_clean_validate_template_warning_missing(self):
"""Raises an error if the template is missing a required column"""
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
PT = qdb.metadata_template.prep_template.PrepTemplate
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning, PT._clean_validate_template,
metadata, 2, qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS)
metadata_dict = {
'2.SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
exp = pd.DataFrame.from_dict(metadata_dict, orient='index')
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp)
def test_clean_validate_template(self):
PT = qdb.metadata_template.prep_template.PrepTemplate
obs = PT._clean_validate_template(
self.metadata, 2,
qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS)
metadata_dict = {
'2.SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'emp_status': 'EMP',
'str_column': 'Value for sample 1',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'2.SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'emp_status': 'EMP',
'str_column': 'Value for sample 2',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'2.SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'emp_status': 'EMP',
'str_column': 'Value for sample 3',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
exp = pd.DataFrame.from_dict(metadata_dict, orient='index')
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp)
def test_get_category(self):
pt = qdb.metadata_template.prep_template.PrepTemplate(1)
obs = pt.get_category('primer')
exp = {
'1.SKB2.640194': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM4.640180': 'GTGCCAGCMGCCGCGGTAA',
'1.SKB3.640195': 'GTGCCAGCMGCCGCGGTAA',
'1.SKB6.640176': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD6.640190': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM6.640187': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD9.640182': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM8.640201': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM2.640199': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD2.640178': 'GTGCCAGCMGCCGCGGTAA',
'1.SKB7.640196': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD4.640185': 'GTGCCAGCMGCCGCGGTAA',
'1.SKB8.640193': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM3.640197': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD5.640186': 'GTGCCAGCMGCCGCGGTAA',
'1.SKB1.640202': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM1.640183': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD1.640179': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD3.640198': 'GTGCCAGCMGCCGCGGTAA',
'1.SKB5.640181': 'GTGCCAGCMGCCGCGGTAA',
'1.SKB4.640189': 'GTGCCAGCMGCCGCGGTAA',
'1.SKB9.640200': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM9.640192': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD8.640184': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM5.640177': 'GTGCCAGCMGCCGCGGTAA',
'1.SKM7.640188': 'GTGCCAGCMGCCGCGGTAA',
'1.SKD7.640191': 'GTGCCAGCMGCCGCGGTAA'}
self.assertEqual(obs, exp)
def test_get_category_no_exists(self):
pt = qdb.metadata_template.prep_template.PrepTemplate(1)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
pt.get_category('DOESNOTEXIST')
@qiita_test_checker()
class TestPrepTemplateReadWrite(BaseTestPrepTemplate):
"""Tests the PrepTemplate class"""
def setUp(self):
self._set_up()
self._clean_up_files = []
def test_create_duplicate_header(self):
"""Create raises an error when duplicate headers are present"""
self.metadata['STR_COLUMN'] = pd.Series(['', '', ''],
index=self.metadata.index)
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateHeaderError):
qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
def test_create_bad_sample_names(self):
# set a horrible list of sample names
self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
def test_create_unknown_sample_names(self):
# set two real and one fake sample name
self.metadata_dict['NOTREAL'] = self.metadata_dict['SKB7.640196']
del self.metadata_dict['SKB7.640196']
self.metadata = pd.DataFrame.from_dict(self.metadata_dict,
orient='index')
# Test error raised and correct error given
with self.assertRaises(qdb.exceptions.QiitaDBExecutionError) as err:
qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
self.assertEqual(
str(err.exception),
'Samples found in prep template but not sample template: 1.NOTREAL'
)
def test_create_shorter_prep_template(self):
# remove one sample so not all samples in the prep template
del self.metadata_dict['SKB7.640196']
self.metadata = pd.DataFrame.from_dict(self.metadata_dict,
orient='index')
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
obs = self.conn_handler.execute_fetchall(
"SELECT sample_id FROM qiita.prep_%d" % pt.id)
exp = [['1.SKB8.640193'], ['1.SKD8.640184']]
self.assertEqual(obs, exp)
def test_create_error_cleanup(self):
"""Create does not modify the database if an error happens"""
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'group': 2,
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'group': 1,
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'group': 'Value for sample 3',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
exp_id = qdb.util.get_count("qiita.prep_template") + 1
with self.assertRaises(ValueError):
qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, self.test_study, self.data_type)
sql = """SELECT EXISTS(
SELECT * FROM qiita.prep_template
WHERE prep_template_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (exp_id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.prep_template_sample
WHERE prep_template_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (exp_id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.prep_columns
WHERE prep_template_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (exp_id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.study_prep_template
WHERE prep_template_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (exp_id,))[0])
self.assertFalse(qdb.util.exists_table("prep_%d" % exp_id))
def _common_creation_checks(self, new_id, pt, fp_count):
# The returned object has the correct id
self.assertEqual(pt.id, new_id)
self.assertEqual(pt.data_type(), self.data_type)
self.assertEqual(pt.data_type(ret_id=True), self.data_type_id)
self.assertEqual(pt.artifact, None)
self.assertEqual(pt.investigation_type, None)
self.assertEqual(pt.study_id, self.test_study.id)
self.assertEqual(pt.status, "sandbox")
exp_sample_ids = {'%s.SKB8.640193' % self.test_study.id,
'%s.SKD8.640184' % self.test_study.id,
'%s.SKB7.640196' % self.test_study.id}
self.assertEqual(pt._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(pt), 3)
exp_categories = {'str_column', 'ebi_submission_accession',
'run_prefix', 'barcode', 'primer', 'platform',
'instrument_model', 'experiment_design_description',
'library_construction_protocol', 'center_name',
'center_project_name', 'emp_status'}
self.assertItemsEqual(pt.categories(), exp_categories)
exp_dict = {
'%s.SKB7.640196' % self.test_study.id: {
'barcode': 'CCTCTGAGAGCT',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L002_sequences',
'str_column': 'Value for sample 3',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'},
'%s.SKB8.640193' % self.test_study.id: {
'barcode': 'GTCCGCAAGTTA',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 1',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'},
'%s.SKD8.640184' % self.test_study.id: {
'barcode': 'CGTAGAGCTCTC',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 2',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'}
}
for s_id in exp_sample_ids:
self.assertEqual(pt[s_id]._to_dict(), exp_dict[s_id])
# prep and qiime files have been created
filepaths = pt.get_filepaths()
self.assertEqual(len(filepaths), 2)
self.assertEqual(filepaths[0][0], fp_count + 2)
self.assertEqual(filepaths[1][0], fp_count + 1)
def test_create(self):
"""Creates a new PrepTemplate"""
fp_count = qdb.util.get_count('qiita.filepath')
new_id = qdb.util.get_count('qiita.prep_template') + 1
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
self._common_creation_checks(new_id, pt, fp_count)
def test_create_already_prefixed_samples(self):
"""Creates a new PrepTemplate"""
fp_count = qdb.util.get_count('qiita.filepath')
new_id = qdb.util.get_count('qiita.prep_template') + 1
pt = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.prep_template.PrepTemplate.create,
self.metadata_prefixed, self.test_study, self.data_type)
self._common_creation_checks(new_id, pt, fp_count)
def test_generate_files(self):
fp_count = qdb.util.get_count("qiita.filepath")
self.tester.generate_files()
obs = qdb.util.get_count("qiita.filepath")
# We just make sure that the count has been increased by 2, since
# the contents of the files have been tested elsewhere.
self.assertEqual(obs, fp_count + 2)
def test_create_qiime_mapping_file(self):
pt = qdb.metadata_template.prep_template.PrepTemplate(1)
# creating prep template file
_id, fp = qdb.util.get_mountpoint('templates')[0]
obs_fp = pt.create_qiime_mapping_file()
exp_fp = join(fp, '1_prep_1_qiime_19700101-000000.txt')
obs = pd.read_csv(obs_fp, sep='\t', infer_datetime_format=True,
parse_dates=True, index_col=False, comment='\t')
exp = pd.read_csv(
exp_fp, sep='\t', infer_datetime_format=True,
parse_dates=True, index_col=False, comment='\t',
na_values=qdb.metadata_template.constants.NA_VALUES,
true_values=qdb.metadata_template.constants.TRUE_VALUES,
false_values=qdb.metadata_template.constants.FALSE_VALUES)
assert_frame_equal(obs, exp)
def test_create_data_type_id(self):
"""Creates a new PrepTemplate passing the data_type_id"""
fp_count = qdb.util.get_count('qiita.filepath')
new_id = qdb.util.get_count('qiita.prep_template') + 1
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id)
self._common_creation_checks(new_id, pt, fp_count)
def test_create_warning(self):
"""Warns if a required columns is missing for a given functionality
"""
fp_count = qdb.util.get_count("qiita.filepath")
new_id = qdb.util.get_count('qiita.prep_template') + 1
del self.metadata['barcode']
pt = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.prep_template.PrepTemplate.create,
self.metadata, self.test_study, self.data_type)
self.assertEqual(pt.id, new_id)
self.assertEqual(pt.data_type(), self.data_type)
self.assertEqual(pt.data_type(ret_id=True), self.data_type_id)
self.assertEqual(pt.artifact, None)
self.assertEqual(pt.investigation_type, None)
self.assertEqual(pt.study_id, self.test_study.id)
self.assertEqual(pt.status, 'sandbox')
exp_sample_ids = {'%s.SKB8.640193' % self.test_study.id,
'%s.SKD8.640184' % self.test_study.id,
'%s.SKB7.640196' % self.test_study.id}
self.assertEqual(pt._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(pt), 3)
exp_categories = {'str_column', 'ebi_submission_accession',
'run_prefix', 'primer', 'platform',
'instrument_model', 'experiment_design_description',
'library_construction_protocol', 'center_name',
'center_project_name', 'emp_status'}
self.assertItemsEqual(pt.categories(), exp_categories)
exp_dict = {
'%s.SKB7.640196' % self.test_study.id: {
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L002_sequences',
'str_column': 'Value for sample 3',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'},
'%s.SKB8.640193' % self.test_study.id: {
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 1',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'},
'%s.SKD8.640184' % self.test_study.id: {
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 2',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'}
}
for s_id in exp_sample_ids:
self.assertEqual(pt[s_id]._to_dict(), exp_dict[s_id])
# prep and qiime files have been created
filepaths = pt.get_filepaths()
self.assertEqual(len(filepaths), 2)
self.assertEqual(filepaths[0][0], fp_count + 2)
self.assertEqual(filepaths[1][0], fp_count + 1)
def test_create_investigation_type_error(self):
"""Create raises an error if the investigation_type does not exists"""
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id,
'Not a term')
def test_delete_error(self):
"""Try to delete a prep template that already has preprocessed data"""
with self.assertRaises(qdb.exceptions.QiitaDBExecutionError):
qdb.metadata_template.prep_template.PrepTemplate.delete(1)
def test_delete_unkonwn_id_error(self):
"""Try to delete a non existent prep template"""
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.metadata_template.prep_template.PrepTemplate.delete(5)
def test_delete_error_raw_data(self):
"""Try to delete a prep template with a raw data attached to id"""
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id)
pt.artifact = qdb.artifact.Artifact(1)
with self.assertRaises(qdb.exceptions.QiitaDBExecutionError):
qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)
def test_delete(self):
"""Deletes prep template 2"""
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id)
qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_template WHERE prep_template_id=%s",
(pt.id,))
exp = []
self.assertEqual(obs, exp)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_prep_template "
"WHERE prep_template_id=%s", (pt.id,))
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_template_sample "
"WHERE prep_template_id=%s", (pt.id,))
exp = []
self.assertEqual(obs, exp)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_columns WHERE prep_template_id=%s",
(pt.id,))
exp = []
self.assertEqual(obs, exp)
with self.assertRaises(ValueError):
self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_%d" % pt.id)
def test_setitem(self):
"""setitem raises an error (currently not allowed)"""
with self.assertRaises(qdb.exceptions.QiitaDBNotImplementedError):
self.tester['1.SKM7.640188'] = \
qdb.metadata_template.prep_template.PrepSample('1.SKM7.640188',
self.tester)
def test_delitem(self):
"""delitem raises an error (currently not allowed)"""
with self.assertRaises(qdb.exceptions.QiitaDBNotImplementedError):
del self.tester['1.SKM7.640188']
def test_to_file(self):
"""to file writes a tab delimited file with all the metadata"""
fd, fp = mkstemp()
close(fd)
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
pt.to_file(fp)
self._clean_up_files.append(fp)
with open(fp, 'U') as f:
obs = f.read()
self.assertEqual(obs, EXP_PREP_TEMPLATE)
def test_investigation_type_setter(self):
"""Able to update the investigation type"""
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id)
self.assertEqual(pt.investigation_type, None)
pt.investigation_type = "Other"
self.assertEqual(pt.investigation_type, 'Other')
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
pt.investigation_type = "should fail"
def test_investigation_type_instance_setter(self):
pt = qdb.metadata_template.prep_template.PrepTemplate(1)
pt.investigation_type = 'RNASeq'
self.assertEqual(pt.investigation_type, 'RNASeq')
def test_status(self):
pt = qdb.metadata_template.prep_template.PrepTemplate(1)
self.assertEqual(pt.status, 'private')
# Check that changing the status of the processed data, the status
# of the prep template changes
a = qdb.artifact.Artifact(1)
a.visibility = 'public'
self.assertEqual(pt.status, 'public')
# New prep templates have the status to sandbox because there is no
# processed data associated with them
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id)
self.assertEqual(pt.status, 'sandbox')
def test_update_category(self):
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
self.tester.update_category('barcode', {"foo": "bar"})
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
self.tester.update_category('missing column',
{'1.SKB7.640196': 'bar'})
neg_test = self.tester['1.SKB7.640196']['barcode']
mapping = {'1.SKB8.640193': 'AAAAAAAAAAAA',
'1.SKD8.640184': 'CCCCCCCCCCCC'}
self.tester.update_category('barcode', mapping)
self.assertEqual(self.tester['1.SKB7.640196']['barcode'],
neg_test)
self.assertEqual(self.tester['1.SKB8.640193']['barcode'],
'AAAAAAAAAAAA')
self.assertEqual(self.tester['1.SKD8.640184']['barcode'],
'CCCCCCCCCCCC')
neg_test = self.tester['1.SKB7.640196']['center_name']
mapping = {'1.SKB8.640193': 'FOO',
'1.SKD8.640184': 'BAR'}
self.tester.update_category('center_name', mapping)
self.assertEqual(self.tester['1.SKB7.640196']['center_name'], neg_test)
self.assertEqual(self.tester['1.SKB8.640193']['center_name'], 'FOO')
self.assertEqual(self.tester['1.SKD8.640184']['center_name'], 'BAR')
def test_qiime_map_fp(self):
pt = qdb.metadata_template.prep_template.PrepTemplate(1)
exp = join(qdb.util.get_mountpoint('templates')[0][1],
'1_prep_1_qiime_19700101-000000.txt')
self.assertEqual(pt.qiime_map_fp, exp)
def test_check_restrictions(self):
obs = self.tester.check_restrictions(
[qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS['EBI']])
self.assertEqual(obs, set())
del self.metadata['primer']
pt = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.prep_template.PrepTemplate.create,
self.metadata, self.test_study, self.data_type)
obs = pt.check_restrictions(
[qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS['EBI'],
qdb.metadata_template.constants.PREP_TEMPLATE_COLUMNS_TARGET_GENE[
'demultiplex']])
self.assertEqual(obs, {'primer'})
def test_artifact(self):
"""Returns the artifact associated with the prep template"""
self.assertEqual(self.tester.artifact, qdb.artifact.Artifact(1))
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id)
self.assertEqual(pt.artifact, None)
def test_artifact_setter_error(self):
a = qdb.artifact.Artifact(1)
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.tester.artifact = a
def test_artifact_setter(self):
a = qdb.artifact.Artifact(1)
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id)
self.assertEqual(pt.artifact, None)
pt.artifact = a
self.assertEqual(pt.artifact, a)
def test_can_be_updated_on_new(self):
"""test if the template can be updated"""
# you can update a newly created pt
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
self.assertTrue(pt.can_be_updated({'barcode'}))
def test_extend_add_samples(self):
"""extend correctly works adding new samples"""
md_2_samples = self.metadata.loc[('SKB8.640193', 'SKD8.640184'), :]
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
md_2_samples, self.test_study, self.data_type)
npt.assert_warns(
qdb.exceptions.QiitaDBWarning, pt.extend, self.metadata)
exp_sample_ids = {'%s.SKB8.640193' % self.test_study.id,
'%s.SKD8.640184' % self.test_study.id,
'%s.SKB7.640196' % self.test_study.id}
self.assertEqual(pt._get_sample_ids(), exp_sample_ids)
def test_extend_add_samples_error(self):
"""extend fails adding samples to an already preprocessed template"""
df = pd.DataFrame.from_dict(
{'new_sample': {'barcode': 'CCTCTGAGAGCT'}},
orient='index')
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.metadata_template.prep_template.PrepTemplate(1).extend(df)
def test_extend_add_cols(self):
"""extend correctly adds a new columns"""
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
self.metadata['new_col'] = pd.Series(['val1', 'val2', 'val3'],
index=self.metadata.index)
npt.assert_warns(
qdb.exceptions.QiitaDBWarning, pt.extend, self.metadata)
sql = "SELECT * FROM qiita.prep_{0}".format(pt.id)
obs = [dict(o) for o in self.conn_handler.execute_fetchall(sql)]
exp = [{'sample_id': '1.SKB7.640196',
'barcode': 'CCTCTGAGAGCT',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L002_sequences',
'str_column': 'Value for sample 3',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val1'},
{'sample_id': '1.SKB8.640193',
'barcode': 'GTCCGCAAGTTA',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 1',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val2'},
{'sample_id': '1.SKD8.640184',
'barcode': 'CGTAGAGCTCTC',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 2',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val3'}]
self.assertItemsEqual(obs, exp)
def test_extend_update(self):
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
self.metadata['new_col'] = pd.Series(['val1', 'val2', 'val3'],
index=self.metadata.index)
self.metadata['str_column']['SKB7.640196'] = 'NEW VAL'
npt.assert_warns(
qdb.exceptions.QiitaDBWarning, pt.extend, self.metadata)
pt.update(self.metadata)
sql = "SELECT * FROM qiita.prep_{0}".format(pt.id)
obs = [dict(o) for o in self.conn_handler.execute_fetchall(sql)]
exp = [{'sample_id': '1.SKB7.640196',
'barcode': 'CCTCTGAGAGCT',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L002_sequences',
'str_column': 'NEW VAL',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val1'},
{'sample_id': '1.SKB8.640193',
'barcode': 'GTCCGCAAGTTA',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 1',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val2'},
{'sample_id': '1.SKD8.640184',
'barcode': 'CGTAGAGCTCTC',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'instrument_model': 'Illumina MiSeq',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 2',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val3'}]
self.assertItemsEqual(obs, exp)
def test_ebi_experiment_accessions(self):
obs = self.tester.ebi_experiment_accessions
exp = {'1.SKB8.640193': 'ERX0000000',
'1.SKD8.640184': 'ERX0000001',
'1.SKB7.640196': 'ERX0000002',
'1.SKM9.640192': 'ERX0000003',
'1.SKM4.640180': 'ERX0000004',
'1.SKM5.640177': 'ERX0000005',
'1.SKB5.640181': 'ERX0000006',
'1.SKD6.640190': 'ERX0000007',
'1.SKB2.640194': 'ERX0000008',
'1.SKD2.640178': 'ERX0000009',
'1.SKM7.640188': 'ERX0000010',
'1.SKB1.640202': 'ERX0000011',
'1.SKD1.640179': 'ERX0000012',
'1.SKD3.640198': 'ERX0000013',
'1.SKM8.640201': 'ERX0000014',
'1.SKM2.640199': 'ERX0000015',
'1.SKB9.640200': 'ERX0000016',
'1.SKD5.640186': 'ERX0000017',
'1.SKM3.640197': 'ERX0000018',
'1.SKD9.640182': 'ERX0000019',
'1.SKB4.640189': 'ERX0000020',
'1.SKD7.640191': 'ERX0000021',
'1.SKM6.640187': 'ERX0000022',
'1.SKD4.640185': 'ERX0000023',
'1.SKB3.640195': 'ERX0000024',
'1.SKB6.640176': 'ERX0000025',
'1.SKM1.640183': 'ERX0000026'}
self.assertEqual(obs, exp)
obs = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study,
self.data_type).ebi_experiment_accessions
exp = {'%s.SKB8.640193' % self.test_study.id: None,
'%s.SKD8.640184' % self.test_study.id: None,
'%s.SKB7.640196' % self.test_study.id: None}
self.assertEqual(obs, exp)
def test_ebi_experiment_accessions_setter(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.tester.ebi_experiment_accessions = {
'1.SKB8.640193': 'ERX1000000', '1.SKD8.640184': 'ERX1000001'}
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
exp_acc = {'%s.SKB8.640193' % self.test_study.id: 'ERX0000126',
'%s.SKD8.640184' % self.test_study.id: 'ERX0000127'}
pt.ebi_experiment_accessions = exp_acc
exp_acc['%s.SKB7.640196' % self.test_study.id] = None
self.assertEqual(pt.ebi_experiment_accessions, exp_acc)
exp_acc['%s.SKB7.640196' % self.test_study.id] = 'ERX0000128'
pt.ebi_experiment_accessions = exp_acc
self.assertEqual(pt.ebi_experiment_accessions, exp_acc)
# We need to wrap the assignment in a function so we can use
# npt.assert_warns
def f():
pt.ebi_experiment_accessions = exp_acc
npt.assert_warns(qdb.exceptions.QiitaDBWarning, f)
def test_ebi_experiment_accessions_setter_common_samples(self):
# If 2 different prep templates have common samples, setting the
# ebi_experiment_accession should affect only the prep template
# that it was called to, not both prep templates
pt1 = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
pt2 = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
exp_acc1 = {'%s.SKB8.640193' % self.test_study.id: 'ERX0000126',
'%s.SKD8.640184' % self.test_study.id: 'ERX0000127'}
pt1.ebi_experiment_accessions = exp_acc1
exp_acc1['%s.SKB7.640196' % self.test_study.id] = None
self.assertEqual(pt1.ebi_experiment_accessions, exp_acc1)
exp_acc2 = {k: None for k in exp_acc1.keys()}
self.assertEqual(pt2.ebi_experiment_accessions, exp_acc2)
def test_is_submitted_to_ebi(self):
self.assertTrue(self.tester.is_submitted_to_ebi)
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type)
self.assertFalse(pt.is_submitted_to_ebi)
EXP_PREP_TEMPLATE = (
'sample_name\tbarcode\tcenter_name\tcenter_project_name\t'
'ebi_submission_accession\temp_status\texperiment_design_description\t'
'instrument_model\tlibrary_construction_protocol\tplatform\tprimer\t'
'run_prefix\tstr_column\n'
'1.SKB7.640196\tCCTCTGAGAGCT\tANL\tTest Project\t\tEMP\tBBBB\t'
'Illumina MiSeq\tAAAA\tILLUMINA\tGTGCCAGCMGCCGCGGTAA\t'
's_G1_L002_sequences\tValue for sample 3\n'
'1.SKB8.640193\tGTCCGCAAGTTA\tANL\tTest Project\t\tEMP\tBBBB\t'
'Illumina MiSeq\tAAAA\tILLUMINA\tGTGCCAGCMGCCGCGGTAA\t'
's_G1_L001_sequences\tValue for sample 1\n'
'1.SKD8.640184\tCGTAGAGCTCTC\tANL\tTest Project\t\tEMP\tBBBB\t'
'Illumina MiSeq\tAAAA\tILLUMINA\tGTGCCAGCMGCCGCGGTAA\t'
's_G1_L001_sequences\tValue for sample 2\n')
if __name__ == '__main__':
main()
|
|
import gspread
# import pdb
import datetime
import os
from oauth2client.service_account import ServiceAccountCredentials
URL = os.environ['AE_gsheet_url']
SHEET_NAME = 'Sheet2'
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('CAMSCAS-6ac566f0e517.json', scope)
def get_sheet_dict(sheet_url,worksheet_name):
gc = gspread.authorize(credentials)
wkb = gc.open_by_url(sheet_url)
wks = wkb.worksheet(worksheet_name)
v_list = wks.get_all_values()
heading = v_list[0]
v_list_of_dict = []
for v in v_list[2:]:
# pdb.set_trace()
v_list_of_dict.append(dict((heading[i],v[i]) for i in range(len(v))))
return v_list_of_dict
def clear_google_sheet(sheet_url, worksheet_name):
gc = gspread.authorize(credentials)
wkb = gc.open_by_url(sheet_url)
wks = wkb.worksheet(worksheet_name)
wks.resize(rows=1)
# Add a record to the end of google sheet url supplied
# no need to pass index
def add_record_from_dict(sheet_url, worksheet_name, dict_rec):
# one at a time
gc = gspread.authorize(credentials)
wkb = gc.open_by_url(sheet_url)
wks = wkb.worksheet(worksheet_name)
values_list = [ i for i in wks.row_values(1) if i]
dict_rec["#"] = wks.row_count #index = row_count + 1(next item) - 1(1st row heading)
if not set(dict_rec.keys()) == set(values_list):
raise Exception("Dictionary is not well formed for the sheet.")
new_row = []
for i in values_list:
new_row.append(dict_rec[i])
wks.append_row(new_row)
# index is handled in insertion logic
def create_order_dict(order_id, title, tracking_id, carrier, status, order_dt, recv_dt, price, updated_on):
return {
"Order ID" :order_id,
"Title" :title,
"Tracking ID":tracking_id,
"Tracking Status" :carrier,
"Status" :status,
"Order Date" :order_dt,
"Days Left" :recv_dt,
"Price" :price,
"Updated On" :updated_on
}
def batch_update_gsheet(sheet_url, worksheet_name, list_rec, ts):
gc = gspread.authorize(credentials)
wkb = gc.open_by_url(sheet_url)
wks = wkb.worksheet(worksheet_name)
rc = len(list_rec)
# cc = len(list_rec[0].keys())
# cc = len(list_rec[0])
print("number of rows:"+str(wks.row_count))
wks.resize(rc+10,10)
print("number of rows after resize:"+str(wks.row_count))
# wks.add_rows(rc+10-wks.row_count)
print('A2:J'+str(rc+1))
cell_list = wks.range('A2:J'+str(rc+1))
i = 0
j = 0
for cell in cell_list:
# print(list_rec[i])
if(j==0):
cell.value = str(i+1)
elif(j==1):
cell.value = list_rec[i]['Order ID']
elif(j==2):
cell.value = list_rec[i]['Title']
elif(j==3):
cell.value = list_rec[i]['Tracking ID']
elif(j==4):
cell.value = list_rec[i]['Tracking Status']
elif(j==5):
cell.value = list_rec[i]['Status']
elif(j==6):
cell.value = list_rec[i]['Order Date']
elif(j==7):
cell.value = list_rec[i]['Days Left']
elif(j==8):
cell.value = list_rec[i]['Price']
elif(j==9):
cell.value = str(datetime.datetime.now())
j=-1 # made0 in increment step
i+=1
# elif(j==10):
# j=0
# i += 1
# continue
j += 1
# cell.value = 'O_o'
wks.update_cells(cell_list) # Update in batch
def save_aliexpress_orders(dict_orders):
if 'Not Shipped' in dict_orders:
list_awaiting_shipment = dict_orders['Not Shipped']
else:
list_awaiting_shipment = []
if 'Shipped' in dict_orders:
list_awaiting_delivery = dict_orders['Shipped']
else:
list_awaiting_delivery = []
if 'Order Awaiting Payment' in dict_orders:
list_awaiting_payment = dict_orders['Order Awaiting Payment']
else:
list_awaiting_payment = []
if 'Order Completed' in dict_orders:
list_completed = dict_orders['Order Completed']
else:
list_completed = []
# batch update
batch_save_list = []
#tracking, carrier, status should be item wise, not order wise
# for awaiting shipment, tracking id is sent as blank
for i in list_awaiting_shipment:
for j in i['product_list']:
dict_save = create_order_dict(
i['order_id'],
j['title'],
'',
'',
i['status'],
i['order_dt'],
'',
j['amount'],
str(datetime.datetime.now())
)
# add_record_from_dict(URL,SHEET_NAME,dict_save)
# Append to batch list
batch_save_list.append(dict_save)
for i in list_awaiting_delivery:
for j in i['product_list']:
try:
dict_save = create_order_dict(
i['order_id'],
j['title'],
i['tracking_id'],
i['tracking_status'],
i['status'],
i['order_dt'],
''.join(i['status_days_left'].strip('Your order will be closed in:').strip().split(' ')),
j['amount'],
str(datetime.datetime.now())
)
# add_record_from_dict(URL,SHEET_NAME,dict_save)
# Append to batch list
batch_save_list.append(dict_save)
except:
print(j.keys())
import sys
print(sys.exc_info())
print('Error in saving order'+i['order_id'])
for i in list_awaiting_payment:
for j in i['product_list']:
try:
dict_save = create_order_dict(
i['order_id'],
j['title'],
i['tracking_id'],
i['tracking_status'],
i['status'],
i['order_dt'],
''.join(i['status_days_left'].strip('Your order will be closed in:').strip().split(' ')),
j['amount'],
str(datetime.datetime.now())
)
# add_record_from_dict(URL,SHEET_NAME,dict_save)
# Append to batch list
batch_save_list.append(dict_save)
except:
print(j.keys())
import sys
print(sys.exc_info())
print('Error in saving order'+i['order_id'])
for i in list_completed:
for j in i['product_list']:
try:
dict_save = create_order_dict(
i['order_id'],
j['title'],
i['tracking_id'],
i['tracking_status'],
i['status'],
i['order_dt'],
'0 days',
j['amount'],
str(datetime.datetime.now())
)
# add_record_from_dict(URL,SHEET_NAME,dict_save)
# Append to batch list
batch_save_list.append(dict_save)
except:
print(j.keys())
import sys
print(sys.exc_info())
print('Error in saving order'+i['order_id'])
batch_update_gsheet(URL,SHEET_NAME, batch_save_list, str(datetime.datetime.now()))
if __name__ == '__main__':
# clear_google_sheet(URL, SHEET_NAME)
# add_record_from_dict(URL,SHEET_NAME,create_order_dict('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', str(datetime.datetime.now())))
batch_update_gsheet(URL,
SHEET_NAME, [
create_order_dict('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', str(datetime.datetime.now())),
create_order_dict('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', str(datetime.datetime.now())),
create_order_dict('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', str(datetime.datetime.now())),
create_order_dict('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', str(datetime.datetime.now()))],
str(datetime.datetime.now()))
|
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WMain/IOST_WMainI2C.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Basic import *
from IOST_Config import *
from IOST_Testcase import *
import gtk
import gtk.glade
import gobject
#======================================================================
try:
IOST_DBG_EN
if IOST_DBG_EN:
IOST_WMainI2C_DebugEnable =1
else:
IOST_WMainI2C_DebugEnable =0
except:
IOST_DBG_EN = False
IOST_WMainI2C_DebugEnable =0
#======================================================================
class IOST_WMain_I2C():
"""
This is a class to control all component of I2C on IOST WMain window
"""
#----------------------------------------------------------------------
def __init__(self, glade_filename, window_name, builder=None):
""
self.IOST_WMainI2C_WindowName = window_name
if not builder:
self.IOST_WMainI2C_Builder = gtk.Builder()
self.IOST_WMainI2C_Builder.add_from_file(glade_filename)
self.IOST_WMainI2C_Builder.connect_signals(self)
else:
self.IOST_WMainI2C_Builder = builder
#----------------------------------------------------------------------
def SetValueToI2C_Obj(self, window_name):
"""
Init all I2C objects when start IOST Wmain program
"""
if self.IOST_Data["I2C"] == "Enable":
self.IOST_Objs[window_name]["_IP_Enable_I2C_CB"].set_active(True)
for i in range(0, self.IOST_Data["I2C_PortNum"]):
if self.IOST_Data["I2C"+str(i)][0] == "Disable":
self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_CB"].set_active(False)
self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_B"].set_sensitive(False)
else:
self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_CB"].set_active(True)
self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_B"].set_sensitive(True)
else:
self.IOST_Objs[window_name]["_IP_Enable_I2C_CB"].set_active(False)
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_CB"].set_sensitive(False)
self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_B"].set_sensitive(False)
#Update test case
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Data["I2C"+str(i)+"_TestCaseNum"] = len(self.IOST_Data["I2C"+str(i)]) - 1
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_B_clicked(self, object, data=None):
"Control to ConfigI2C-0 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C0")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C0_CB"].get_active()
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C0_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C0"][0] = 'Enable'
else:
self.IOST_Data["I2C0"][0] = 'Disable'
if IOST_WMainI2C_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["I2C0"][0], "IOST_Data->I2C0_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_B_clicked(self, object, data=None):
"Control to ConfigI2C-1 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C1")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C1_CB"].get_active()
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C1_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C1"][0] = 'Enable'
else:
self.IOST_Data["I2C1"][0] = 'Disable'
if IOST_WMainI2C_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["I2C1"][0], "IOST_Data->I2C1_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_B_clicked(self, object, data=None):
"Control to ConfigI2C-2 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C2")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C2_CB"].get_active()
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C2_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C2"][0] = 'Enable'
else:
self.IOST_Data["I2C2"][0] = 'Disable'
if IOST_WMainI2C_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["I2C2"][0], "IOST_Data->I2C2_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_B_clicked(self, object, data=None):
"Control to ConfigI2C-3 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C3")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C3_CB"].get_active()
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C3_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C3"][0] = 'Enable'
else:
self.IOST_Data["I2C3"][0] = 'Disable'
if IOST_WMainI2C_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["I2C3"][0], "IOST_Data->I2C3_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_B_clicked(self, object, data=None):
"Control to ConfigI2C-4 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C4")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C4_CB"].get_active()
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C4_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C4"][0] = 'Enable'
else:
self.IOST_Data["I2C4"][0] = 'Disable'
if IOST_WMainI2C_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["I2C4"][0], "IOST_Data->I2C4_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_B_clicked(self, object, data=None):
"Control to ConfigI2C-5 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C5")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C5_CB"].get_active()
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C5_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C5"][0] = 'Enable'
else:
self.IOST_Data["I2C5"][0] = 'Disable'
if IOST_WMainI2C_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["I2C5"][0], "IOST_Data->I2C5_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_IP_Enable_I2C_CB_toggled(self, object, data=None):
Res = self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_IP_Enable_I2C_CB"].get_active()
self.IOST_WMain_I2C_set_sensitive_all(Res)
if Res:
self.IOST_Data["I2C"] = 'Enable'
else:
self.IOST_Data["I2C"] = 'Disable'
#----------------------------------------------------------------------
def IOST_WMain_I2C_set_sensitive_all(self, value):
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C"+str(i)+"_CB"].set_sensitive(value)
if self.IOST_Data["I2C"+str(i)][0] == "Enable" and value:
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C"+str(i)+"_B"].set_sensitive(value)
else:
self.IOST_Objs[self.IOST_WMainI2C_WindowName]["_Config_I2C"+str(i)+"_B"].set_sensitive(False)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is the main executable of Maraschino. It parses the command line arguments, does init and calls the start function of Maraschino."""
import sys
import os
# Check if frozen by py2exe
def check_frozen():
return hasattr(sys, 'frozen')
def get_rundir():
if check_frozen():
return os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding( )))
return os.path.abspath(__file__)[:-13]
# Set the rundir
rundir = get_rundir()
# Include paths
sys.path.insert(0, rundir)
sys.path.insert(0, os.path.join(rundir, 'lib'))
# Create Flask instance
from flask import Flask
app = Flask(__name__)
# If frozen, we need define static and template paths
if check_frozen():
app.root_path = rundir
app.static_path = '/static'
app.add_url_rule(
app.static_path + '/<path:filename>',
endpoint='static',
view_func=app.send_static_file
)
from jinja2 import FileSystemLoader
app.jinja_loader = FileSystemLoader(os.path.join(rundir, 'templates'))
def import_modules():
"""All modules that are available in Maraschino are at this point imported."""
import modules.applications
import modules.controls
import modules.couchpotato
import modules.currently_playing
import modules.diskspace
import modules.headphones
import modules.index
import modules.ipcamera
import modules.library
import modules.log
import modules.nzbget
import modules.recently_added
import modules.remote
import modules.rtorrentdl
import modules.sabnzbd
import modules.script_launcher
import modules.search
import modules.sickbeard
import modules.trakt
import modules.traktplus
import modules.transmission
import modules.updater
import modules.utorrent
import modules.weather
import modules.xbmc_notify
import modules.sonarr
import mobile
import xbmcmm
@app.teardown_request
def shutdown_session(exception=None):
"""This function is called as soon as a session is shutdown and makes sure, that the db session is also removed."""
from maraschino.database import db_session
db_session.remove()
import maraschino
def main():
"""Main function that is called at the startup of Maraschino."""
from optparse import OptionParser
p = OptionParser()
# define command line options
p.add_option('-p', '--port',
dest='port',
default=None,
help="Force webinterface to listen on this port")
p.add_option('-d', '--daemon',
dest='daemon',
action='store_true',
help='Run as a daemon')
p.add_option('--pidfile',
dest='pidfile',
help='Create a pid file (only relevant when running as a daemon)')
p.add_option('--log',
dest='log',
help='Create a log file at a desired location')
p.add_option('-v', '--verbose',
dest='verbose',
action='store_true',
help='Silence the logger')
p.add_option('--develop',
action="store_true",
dest='develop',
help="Start instance of development server")
p.add_option('--database',
dest='database',
help='Custom database file location')
p.add_option('--webroot',
dest='webroot',
help='Web root for Maraschino')
p.add_option('--host',
dest='host',
help='Web host for Maraschino')
p.add_option('--kiosk',
dest='kiosk',
action='store_true',
help='Disable settings in the UI')
p.add_option('--datadir',
dest='datadir',
help='Write program data to custom location')
p.add_option('--noupdate',
action="store_true",
dest='noupdate',
help='Disable the internal updater')
# parse command line for defined options
options, args = p.parse_args()
if options.datadir:
data_dir = options.datadir
else:
data_dir = rundir
if options.daemon:
maraschino.DAEMON = True
maraschino.VERBOSE = False
if options.pidfile:
maraschino.PIDFILE = options.pidfile
maraschino.VERBOSE = False
if options.port:
PORT = int(options.port)
else:
PORT = 7000
if options.log:
maraschino.LOG_FILE = options.log
if options.verbose:
maraschino.VERBOSE = True
if options.develop:
maraschino.DEVELOPMENT = True
if options.database:
DATABASE = options.database
else:
DATABASE = os.path.join(data_dir, 'maraschino.db')
if options.webroot:
maraschino.WEBROOT = options.webroot
if options.host:
maraschino.HOST = options.host
if options.kiosk:
maraschino.KIOSK = True
if options.noupdate:
maraschino.UPDATER = False
maraschino.RUNDIR = rundir
maraschino.DATA_DIR = data_dir
maraschino.FULL_PATH = os.path.join(rundir, 'Maraschino.py')
maraschino.ARGS = sys.argv[1:]
maraschino.PORT = PORT
maraschino.DATABASE = DATABASE
maraschino.initialize()
if maraschino.PIDFILE or maraschino.DAEMON:
maraschino.daemonize()
import_modules()
maraschino.init_updater()
maraschino.start()
if __name__ == '__main__':
main()
|
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of Paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from binascii import hexlify
import errno
import os
import stat
import threading
import time
import weakref
from paramiko import util
from paramiko.channel import Channel
from paramiko.message import Message
from paramiko.common import INFO, DEBUG, o777
from paramiko.py3compat import bytestring, b, u, long
from paramiko.sftp import (
BaseSFTP, CMD_OPENDIR, CMD_HANDLE, SFTPError, CMD_READDIR, CMD_NAME,
CMD_CLOSE, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_CREATE,
SFTP_FLAG_TRUNC, SFTP_FLAG_APPEND, SFTP_FLAG_EXCL, CMD_OPEN, CMD_REMOVE,
CMD_RENAME, CMD_MKDIR, CMD_RMDIR, CMD_STAT, CMD_ATTRS, CMD_LSTAT,
CMD_SYMLINK, CMD_SETSTAT, CMD_READLINK, CMD_REALPATH, CMD_STATUS,
CMD_EXTENDED, SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED,
)
from paramiko.sftp_attr import SFTPAttributes
from paramiko.ssh_exception import SSHException
from paramiko.sftp_file import SFTPFile
from paramiko.util import ClosingContextManager
def _to_unicode(s):
"""
decode a string as ascii or utf8 if possible (as required by the sftp
protocol). if neither works, just return a byte string because the server
probably doesn't know the filename's encoding.
"""
try:
return s.encode('ascii')
except (UnicodeError, AttributeError):
try:
return s.decode('utf-8')
except UnicodeError:
return s
b_slash = b'/'
class SFTPClient(BaseSFTP, ClosingContextManager):
"""
SFTP client object.
Used to open an SFTP session across an open SSH `.Transport` and perform
remote file operations.
Instances of this class may be used as context managers.
"""
def __init__(self, sock):
"""
Create an SFTP client from an existing `.Channel`. The channel
should already have requested the ``"sftp"`` subsystem.
An alternate way to create an SFTP client context is by using
`from_transport`.
:param .Channel sock: an open `.Channel` using the ``"sftp"`` subsystem
:raises:
`.SSHException` -- if there's an exception while negotiating sftp
"""
BaseSFTP.__init__(self)
self.sock = sock
self.ultra_debug = False
self.request_number = 1
# lock for request_number
self._lock = threading.Lock()
self._cwd = None
# request # -> SFTPFile
self._expecting = weakref.WeakValueDictionary()
if type(sock) is Channel:
# override default logger
transport = self.sock.get_transport()
self.logger = util.get_logger(
transport.get_log_channel() + '.sftp')
self.ultra_debug = transport.get_hexdump()
try:
server_version = self._send_version()
except EOFError:
raise SSHException('EOF during negotiation')
self._log(
INFO,
'Opened sftp connection (server version {})'.format(server_version)
)
@classmethod
def from_transport(cls, t, window_size=None, max_packet_size=None):
"""
Create an SFTP client channel from an open `.Transport`.
Setting the window and packet sizes might affect the transfer speed.
The default settings in the `.Transport` class are the same as in
OpenSSH and should work adequately for both files transfers and
interactive sessions.
:param .Transport t: an open `.Transport` which is already
authenticated
:param int window_size:
optional window size for the `.SFTPClient` session.
:param int max_packet_size:
optional max packet size for the `.SFTPClient` session..
:return:
a new `.SFTPClient` object, referring to an sftp session (channel)
across the transport
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
chan = t.open_session(window_size=window_size,
max_packet_size=max_packet_size)
if chan is None:
return None
chan.invoke_subsystem('sftp')
return cls(chan)
def _log(self, level, msg, *args):
if isinstance(msg, list):
for m in msg:
self._log(level, m, *args)
else:
# NOTE: these bits MUST continue using %-style format junk because
# logging.Logger.log() explicitly requires it. Grump.
# escape '%' in msg (they could come from file or directory names)
# before logging
msg = msg.replace('%', '%%')
super(SFTPClient, self)._log(
level,
"[chan %s] " + msg, *([self.sock.get_name()] + list(args)))
def close(self):
"""
Close the SFTP session and its underlying channel.
.. versionadded:: 1.4
"""
self._log(INFO, 'sftp session closed.')
self.sock.close()
def get_channel(self):
"""
Return the underlying `.Channel` object for this SFTP session. This
might be useful for doing things like setting a timeout on the channel.
.. versionadded:: 1.7.1
"""
return self.sock
def listdir(self, path='.'):
"""
Return a list containing the names of the entries in the given
``path``.
The list is in arbitrary order. It does not include the special
entries ``'.'`` and ``'..'`` even if they are present in the folder.
This method is meant to mirror ``os.listdir`` as closely as possible.
For a list of full `.SFTPAttributes` objects, see `listdir_attr`.
:param str path: path to list (defaults to ``'.'``)
"""
return [f.filename for f in self.listdir_attr(path)]
def listdir_attr(self, path='.'):
"""
Return a list containing `.SFTPAttributes` objects corresponding to
files in the given ``path``. The list is in arbitrary order. It does
not include the special entries ``'.'`` and ``'..'`` even if they are
present in the folder.
The returned `.SFTPAttributes` objects will each have an additional
field: ``longname``, which may contain a formatted string of the file's
attributes, in unix format. The content of this string will probably
depend on the SFTP server implementation.
:param str path: path to list (defaults to ``'.'``)
:return: list of `.SFTPAttributes` objects
.. versionadded:: 1.2
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'listdir({!r})'.format(path))
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_binary()
filelist = []
while True:
try:
t, msg = self._request(CMD_READDIR, handle)
except EOFError:
# done with handle
break
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
for i in range(count):
filename = msg.get_text()
longname = msg.get_text()
attr = SFTPAttributes._from_msg(msg, filename, longname)
if (filename != '.') and (filename != '..'):
filelist.append(attr)
self._request(CMD_CLOSE, handle)
return filelist
def listdir_iter(self, path='.', read_aheads=50):
"""
Generator version of `.listdir_attr`.
See the API docs for `.listdir_attr` for overall details.
This function adds one more kwarg on top of `.listdir_attr`:
``read_aheads``, an integer controlling how many
``SSH_FXP_READDIR`` requests are made to the server. The default of 50
should suffice for most file listings as each request/response cycle
may contain multiple files (dependent on server implementation.)
.. versionadded:: 1.15
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'listdir({!r})'.format(path))
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_string()
nums = list()
while True:
try:
# Send out a bunch of readdir requests so that we can read the
# responses later on Section 6.7 of the SSH file transfer RFC
# explains this
# http://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
for i in range(read_aheads):
num = self._async_request(type(None), CMD_READDIR, handle)
nums.append(num)
# For each of our sent requests
# Read and parse the corresponding packets
# If we're at the end of our queued requests, then fire off
# some more requests
# Exit the loop when we've reached the end of the directory
# handle
for num in nums:
t, pkt_data = self._read_packet()
msg = Message(pkt_data)
new_num = msg.get_int()
if num == new_num:
if t == CMD_STATUS:
self._convert_status(msg)
count = msg.get_int()
for i in range(count):
filename = msg.get_text()
longname = msg.get_text()
attr = SFTPAttributes._from_msg(
msg, filename, longname)
if (filename != '.') and (filename != '..'):
yield attr
# If we've hit the end of our queued requests, reset nums.
nums = list()
except EOFError:
self._request(CMD_CLOSE, handle)
return
def open(self, filename, mode='r', bufsize=-1):
"""
Open a file on the remote server. The arguments are the same as for
Python's built-in `python:file` (aka `python:open`). A file-like
object is returned, which closely mimics the behavior of a normal
Python file object, including the ability to be used as a context
manager.
The mode indicates how the file is to be opened: ``'r'`` for reading,
``'w'`` for writing (truncating an existing file), ``'a'`` for
appending, ``'r+'`` for reading/writing, ``'w+'`` for reading/writing
(truncating an existing file), ``'a+'`` for reading/appending. The
Python ``'b'`` flag is ignored, since SSH treats all files as binary.
The ``'U'`` flag is supported in a compatible way.
Since 1.5.2, an ``'x'`` flag indicates that the operation should only
succeed if the file was created and did not previously exist. This has
no direct mapping to Python's file flags, but is commonly known as the
``O_EXCL`` flag in posix.
The file will be buffered in standard Python style by default, but
can be altered with the ``bufsize`` parameter. ``0`` turns off
buffering, ``1`` uses line buffering, and any number greater than 1
(``>1``) uses that specific buffer size.
:param str filename: name of the file to open
:param str mode: mode (Python-style) to open in
:param int bufsize: desired buffering (-1 = default buffer size)
:return: an `.SFTPFile` object representing the open file
:raises: ``IOError`` -- if the file could not be opened.
"""
filename = self._adjust_cwd(filename)
self._log(DEBUG, 'open({!r}, {!r})'.format(filename, mode))
imode = 0
if ('r' in mode) or ('+' in mode):
imode |= SFTP_FLAG_READ
if ('w' in mode) or ('+' in mode) or ('a' in mode):
imode |= SFTP_FLAG_WRITE
if 'w' in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_TRUNC
if 'a' in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_APPEND
if 'x' in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_EXCL
attrblock = SFTPAttributes()
t, msg = self._request(CMD_OPEN, filename, imode, attrblock)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_binary()
self._log(
DEBUG,
'open({!r}, {!r}) -> {}'.format(filename, mode, u(hexlify(handle)))
)
return SFTPFile(self, handle, mode, bufsize)
# Python continues to vacillate about "open" vs "file"...
file = open
def remove(self, path):
"""
Remove the file at the given path. This only works on files; for
removing folders (directories), use `rmdir`.
:param str path: path (absolute or relative) of the file to remove
:raises: ``IOError`` -- if the path refers to a folder (directory)
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'remove({!r})'.format(path))
self._request(CMD_REMOVE, path)
unlink = remove
def rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``.
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
existing name of the file or folder
:param str newpath:
new name for the file or folder, must not exist already
:raises:
``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, 'rename({!r}, {!r})'.format(oldpath, newpath))
self._request(CMD_RENAME, oldpath, newpath)
def posix_rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``, following
posix conventions.
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder, will be
overwritten if it already exists
:raises:
``IOError`` -- if ``newpath`` is a folder, posix-rename is not
supported by the server or something else goes wrong
:versionadded: 2.2
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, 'posix_rename({!r}, {!r})'.format(oldpath, newpath))
self._request(
CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath
)
def mkdir(self, path, mode=o777):
"""
Create a folder (directory) named ``path`` with numeric mode ``mode``.
The default mode is 0777 (octal). On some systems, mode is ignored.
Where it is used, the current umask value is first masked out.
:param str path: name of the folder to create
:param int mode: permissions (posix-style) for the newly-created folder
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'mkdir({!r}, {!r})'.format(path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_MKDIR, path, attr)
def rmdir(self, path):
"""
Remove the folder named ``path``.
:param str path: name of the folder to remove
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'rmdir({!r})'.format(path))
self._request(CMD_RMDIR, path)
def stat(self, path):
"""
Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
Python's ``stat`` structure as returned by ``os.stat``, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a Python `python:stat` object, the result may not be accessed as
a tuple. This is mostly due to the author's slack factor.
The fields supported are: ``st_mode``, ``st_size``, ``st_uid``,
``st_gid``, ``st_atime``, and ``st_mtime``.
:param str path: the filename to stat
:return:
an `.SFTPAttributes` object containing attributes about the given
file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'stat({!r})'.format(path))
t, msg = self._request(CMD_STAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def lstat(self, path):
"""
Retrieve information about a file on the remote system, without
following symbolic links (shortcuts). This otherwise behaves exactly
the same as `stat`.
:param str path: the filename to stat
:return:
an `.SFTPAttributes` object containing attributes about the given
file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'lstat({!r})'.format(path))
t, msg = self._request(CMD_LSTAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def symlink(self, source, dest):
"""
Create a symbolic link to the ``source`` path at ``destination``.
:param str source: path of the original file
:param str dest: path of the newly created symlink
"""
dest = self._adjust_cwd(dest)
self._log(DEBUG, 'symlink({!r}, {!r})'.format(source, dest))
source = bytestring(source)
self._request(CMD_SYMLINK, source, dest)
def chmod(self, path, mode):
"""
Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by Python's `os.chmod`
function.
:param str path: path of the file to change the permissions of
:param int mode: new permissions
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'chmod({!r}, {!r})'.format(path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_SETSTAT, path, attr)
def chown(self, path, uid, gid):
"""
Change the owner (``uid``) and group (``gid``) of a file. As with
Python's `os.chown` function, you must pass both arguments, so if you
only want to change one, use `stat` first to retrieve the current
owner and group.
:param str path: path of the file to change the owner and group of
:param int uid: new owner's uid
:param int gid: new group id
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'chown({!r}, {!r}, {!r})'.format(path, uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self._request(CMD_SETSTAT, path, attr)
def utime(self, path, times):
"""
Set the access and modified times of the file specified by ``path``.
If ``times`` is ``None``, then the file's access and modified times
are set to the current time. Otherwise, ``times`` must be a 2-tuple
of numbers, of the form ``(atime, mtime)``, which is used to set the
access and modified times, respectively. This bizarre API is mimicked
from Python for the sake of consistency -- I apologize.
:param str path: path of the file to modify
:param tuple times:
``None`` or a tuple of (access time, modified time) in standard
internet epoch time (seconds since 01 January 1970 GMT)
"""
path = self._adjust_cwd(path)
if times is None:
times = (time.time(), time.time())
self._log(DEBUG, 'utime({!r}, {!r})'.format(path, times))
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self._request(CMD_SETSTAT, path, attr)
def truncate(self, path, size):
"""
Change the size of the file specified by ``path``. This usually
extends or shrinks the size of the file, just like the `~file.truncate`
method on Python file objects.
:param str path: path of the file to modify
:param int size: the new size of the file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'truncate({!r}, {!r})'.format(path, size))
attr = SFTPAttributes()
attr.st_size = size
self._request(CMD_SETSTAT, path, attr)
def readlink(self, path):
"""
Return the target of a symbolic link (shortcut). You can use
`symlink` to create these. The result may be either an absolute or
relative pathname.
:param str path: path of the symbolic link file
:return: target path, as a `str`
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'readlink({!r})'.format(path))
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError('Readlink returned {} results'.format(count))
return _to_unicode(msg.get_string())
def normalize(self, path):
"""
Return the normalized path (on the server) of a given path. This
can be used to quickly resolve symbolic links or determine what the
server is considering to be the "current folder" (by passing ``'.'``
as ``path``).
:param str path: path to be normalized
:return: normalized form of the given path (as a `str`)
:raises: ``IOError`` -- if the path can't be resolved on the server
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'normalize({!r})'.format(path))
t, msg = self._request(CMD_REALPATH, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count != 1:
raise SFTPError('Realpath returned {} results'.format(count))
return msg.get_text()
def chdir(self, path=None):
"""
Change the "current directory" of this SFTP session. Since SFTP
doesn't really have the concept of a current working directory, this is
emulated by Paramiko. Once you use this method to set a working
directory, all operations on this `.SFTPClient` object will be relative
to that path. You can pass in ``None`` to stop using a current working
directory.
:param str path: new current working directory
:raises:
``IOError`` -- if the requested path doesn't exist on the server
.. versionadded:: 1.4
"""
if path is None:
self._cwd = None
return
if not stat.S_ISDIR(self.stat(path).st_mode):
code = errno.ENOTDIR
raise SFTPError(
code, "{}: {}".format(os.strerror(code), path)
)
self._cwd = b(self.normalize(path))
def getcwd(self):
"""
Return the "current working directory" for this SFTP session, as
emulated by Paramiko. If no directory has been set with `chdir`,
this method will return ``None``.
.. versionadded:: 1.4
"""
# TODO: make class initialize with self._cwd set to self.normalize('.')
return self._cwd and u(self._cwd)
def _transfer_with_callback(self, reader, writer, file_size, callback):
size = 0
while True:
data = reader.read(32768)
writer.write(data)
size += len(data)
if len(data) == 0:
break
if callback is not None:
callback(size, file_size)
return size
def putfo(self, fl, remotepath, file_size=0, callback=None, confirm=True):
"""
Copy the contents of an open file object (``fl``) to the SFTP server as
``remotepath``. Any exception raised by operations will be passed
through.
The SFTP operations use pipelining for speed.
:param fl: opened file or file-like object to copy
:param str remotepath: the destination path on the SFTP server
:param int file_size:
optional size parameter passed to callback. If none is specified,
size defaults to 0
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
(since 1.7.4)
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size (since 1.7.7)
:return:
an `.SFTPAttributes` object containing attributes about the given
file.
.. versionadded:: 1.10
"""
with self.file(remotepath, 'wb') as fr:
fr.set_pipelined(True)
size = self._transfer_with_callback(
reader=fl, writer=fr, file_size=file_size, callback=callback
)
if confirm:
s = self.stat(remotepath)
if s.st_size != size:
raise IOError(
'size mismatch in put! {} != {}'.format(s.st_size, size))
else:
s = SFTPAttributes()
return s
def put(self, localpath, remotepath, callback=None, confirm=True):
"""
Copy a local file (``localpath``) to the SFTP server as ``remotepath``.
Any exception raised by operations will be passed through. This
method is primarily provided as a convenience.
The SFTP operations use pipelining for speed.
:param str localpath: the local file to copy
:param str remotepath: the destination path on the SFTP server. Note
that the filename should be included. Only specifying a directory
may result in an error.
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size
:return: an `.SFTPAttributes` object containing attributes about the
given file
.. versionadded:: 1.4
.. versionchanged:: 1.7.4
``callback`` and rich attribute return value added.
.. versionchanged:: 1.7.7
``confirm`` param added.
"""
file_size = os.stat(localpath).st_size
with open(localpath, 'rb') as fl:
return self.putfo(fl, remotepath, file_size, callback, confirm)
def getfo(self, remotepath, fl, callback=None):
"""
Copy a remote file (``remotepath``) from the SFTP server and write to
an open file or file-like object, ``fl``. Any exception raised by
operations will be passed through. This method is primarily provided
as a convenience.
:param object remotepath: opened file or file-like object to copy to
:param str fl:
the destination path on the local host or open file object
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
:return: the `number <int>` of bytes written to the opened file object
.. versionadded:: 1.10
"""
file_size = self.stat(remotepath).st_size
with self.open(remotepath, 'rb') as fr:
fr.prefetch(file_size)
return self._transfer_with_callback(
reader=fr, writer=fl, file_size=file_size, callback=callback
)
def get(self, remotepath, localpath, callback=None):
"""
Copy a remote file (``remotepath``) from the SFTP server to the local
host as ``localpath``. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
:param str remotepath: the remote file to copy
:param str localpath: the destination path on the local host
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
.. versionadded:: 1.4
.. versionchanged:: 1.7.4
Added the ``callback`` param
"""
with open(localpath, 'wb') as fl:
size = self.getfo(remotepath, fl, callback)
s = os.stat(localpath)
if s.st_size != size:
raise IOError(
'size mismatch in get! {} != {}'.format(s.st_size, size))
# ...internals...
def _request(self, t, *arg):
num = self._async_request(type(None), t, *arg)
return self._read_response(num)
def _async_request(self, fileobj, t, *arg):
# this method may be called from other threads (prefetch)
self._lock.acquire()
try:
msg = Message()
msg.add_int(self.request_number)
for item in arg:
if isinstance(item, long):
msg.add_int64(item)
elif isinstance(item, int):
msg.add_int(item)
elif isinstance(item, SFTPAttributes):
item._pack(msg)
else:
# For all other types, rely on as_string() to either coerce
# to bytes before writing or raise a suitable exception.
msg.add_string(item)
num = self.request_number
self._expecting[num] = fileobj
self.request_number += 1
finally:
self._lock.release()
self._send_packet(t, msg)
return num
def _read_response(self, waitfor=None):
while True:
try:
t, data = self._read_packet()
except EOFError as e:
raise SSHException('Server connection dropped: {}'.format(e))
msg = Message(data)
num = msg.get_int()
self._lock.acquire()
try:
if num not in self._expecting:
# might be response for a file that was closed before
# responses came back
self._log(DEBUG, 'Unexpected response #{}'.format(num))
if waitfor is None:
# just doing a single check
break
continue
fileobj = self._expecting[num]
del self._expecting[num]
finally:
self._lock.release()
if num == waitfor:
# synchronous
if t == CMD_STATUS:
self._convert_status(msg)
return t, msg
# can not rewrite this to deal with E721, either as a None check
# nor as not an instance of None or NoneType
if fileobj is not type(None): # noqa
fileobj._async_response(t, msg, num)
if waitfor is None:
# just doing a single check
break
return None, None
def _finish_responses(self, fileobj):
while fileobj in self._expecting.values():
self._read_response()
fileobj._check_exception()
def _convert_status(self, msg):
"""
Raises EOFError or IOError on error status; otherwise does nothing.
"""
code = msg.get_int()
text = msg.get_text()
if code == SFTP_OK:
return
elif code == SFTP_EOF:
raise EOFError(text)
elif code == SFTP_NO_SUCH_FILE:
# clever idea from john a. meinel: map the error codes to errno
raise IOError(errno.ENOENT, text)
elif code == SFTP_PERMISSION_DENIED:
raise IOError(errno.EACCES, text)
else:
raise IOError(text)
def _adjust_cwd(self, path):
"""
Return an adjusted path if we're emulating a "current working
directory" for the server.
"""
path = b(path)
if self._cwd is None:
return path
if len(path) and path[0:1] == b_slash:
# absolute path
return path
if self._cwd == b_slash:
return self._cwd + path
return self._cwd + b_slash + path
class SFTP(SFTPClient):
"""
An alias for `.SFTPClient` for backwards compatibility.
"""
pass
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.icaluid'
db.add_column(u'events_event', 'icaluid',
self.gf('django.db.models.fields.TextField')(unique=True, null=True),
keep_default=False)
# Adding field 'Event.cancelled'
db.add_column(u'events_event', 'cancelled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Event.update_date'
db.add_column(u'events_event', 'update_date',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'Event.link'
db.add_column(u'events_event', 'link',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.icaluid'
db.delete_column(u'events_event', 'icaluid')
# Deleting field 'Event.cancelled'
db.delete_column(u'events_event', 'cancelled')
# Deleting field 'Event.update_date'
db.delete_column(u'events_event', 'update_date')
# Deleting field 'Event.link'
db.delete_column(u'events_event', 'link')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.event': {
'Meta': {'object_name': 'Event'},
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icaluid': ('django.db.models.fields.TextField', [], {'unique': 'True', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'when_over': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_over_guessed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'where': ('django.db.models.fields.TextField', [], {'default': "u'earth'"}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['persons.Person']", 'null': 'True', 'symmetrical': 'False'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
u'mks.knesset': {
'Meta': {'object_name': 'Knesset'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': u"orm['mks.Party']"}),
'current_position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': u"orm['mks.Membership']", 'to': u"orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Party']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'unique_together': "(('knesset', 'name'),)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'knesset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parties'", 'null': 'True', 'to': u"orm['mks.Knesset']"}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'split_from': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Party']", 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'calendar_sync_token': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'calendar_url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': u"orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['persons.Title']"}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'persons.title': {
'Meta': {'object_name': 'Title'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
}
}
complete_apps = ['events']
|
|
#!/usr/bin/env python
"""Unit tests run as PYTHONPATH=.. python3 ./test_valve.py."""
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from functools import partial
import ipaddress
import logging
import os
import unittest
import tempfile
import shutil
import socket
import time
from ryu.controller import dpset
from ryu.controller.ofp_event import EventOFPMsgBase
from ryu.lib import mac
from ryu.lib.packet import arp, ethernet, icmp, icmpv6, ipv4, ipv6, lldp, slow, packet, vlan
from ryu.ofproto import ether, inet
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
from prometheus_client import CollectorRegistry
from beka.route import RouteAddition, RouteRemoval
from beka.ip import IPAddress, IPPrefix
from faucet import faucet
from faucet import faucet_bgp
from faucet import faucet_dot1x
from faucet import faucet_experimental_api
from faucet import faucet_experimental_event
from faucet import faucet_metrics
from faucet import valves_manager
from faucet import valve_of
from faucet import valve_packet
from faucet import valve_util
from faucet.valve import TfmValve
from fakeoftable import FakeOFTable
FAUCET_MAC = '0e:00:00:00:00:01'
# TODO: fix fake OF table implementation for in_port filtering
# (ie. do not output to in_port)
DP1_CONFIG = """
dp_id: 1
ignore_learn_ins: 100
combinatorial_port_flood: True
ofchannel_log: '/dev/null'
pipeline_config_dir: '%s/../etc/faucet'
packetin_pps: 99
lldp_beacon:
send_interval: 1
max_per_interval: 1
""" % os.path.dirname(os.path.realpath(__file__))
IDLE_DP1_CONFIG = """
use_idle_timeout: True
""" + DP1_CONFIG
GROUP_DP1_CONFIG = """
group_table: True
""" + DP1_CONFIG
GROUP_ROUTING_DP1_CONFIG = """
group_table_routing: True
""" + DP1_CONFIG
CONFIG = """
dps:
s1:
hardware: 'GenericTFM'
%s
interfaces:
p1:
number: 1
native_vlan: v100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
loop_protect: True
receive_lldp: True
max_hosts: 1
hairpin: True
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
loop_protect: True
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
tagged_vlans: [v300]
s2:
hardware: 'Open vSwitch'
dp_id: 0xdeadbeef
interfaces:
p1:
number: 1
native_vlan: v100
s3:
hardware: 'Open vSwitch'
combinatorial_port_flood: True
dp_id: 0x3
stack:
priority: 1
interfaces:
p1:
number: 1
native_vlan: v300
p2:
number: 2
native_vlan: v300
p3:
number: 3
native_vlan: v300
p4:
number: 4
native_vlan: v300
5:
stack:
dp: s4
port: 5
s4:
hardware: 'Open vSwitch'
dp_id: 0x4
interfaces:
p1:
number: 1
native_vlan: v300
p2:
number: 2
native_vlan: v300
p3:
number: 3
native_vlan: v300
p4:
number: 4
native_vlan: v300
5:
number: 5
stack:
dp: s3
port: 5
routers:
router1:
vlans: [v100, v200]
vlans:
v100:
vid: 0x100
targeted_gw_resolution: True
faucet_vips: ['10.0.0.254/24']
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
- route:
ip_dst: 10.99.98.0/24
ip_gw: 10.0.0.99
v200:
vid: 0x200
faucet_vips: ['fc00::1:254/112', 'fe80::1:254/64']
routes:
- route:
ip_dst: 'fc00::10:0/112'
ip_gw: 'fc00::1:1'
- route:
ip_dst: 'fc00::20:0/112'
ip_gw: 'fc00::1:99'
v300:
vid: 0x300
v400:
vid: 0x400
""" % DP1_CONFIG
def build_pkt(pkt):
"""Build and return a packet and eth type from a dict."""
def serialize(layers):
"""Concatenate packet layers and serialize."""
result = packet.Packet()
for layer in reversed(layers):
result.add_protocol(layer)
result.serialize()
return result
layers = []
assert 'eth_dst' in pkt and 'eth_src' in pkt
ethertype = None
if 'arp_source_ip' in pkt and 'arp_target_ip' in pkt:
ethertype = ether.ETH_TYPE_ARP
arp_code = arp.ARP_REQUEST
if pkt['eth_dst'] == FAUCET_MAC:
arp_code = arp.ARP_REPLY
layers.append(arp.arp(
src_ip=pkt['arp_source_ip'], dst_ip=pkt['arp_target_ip'], opcode=arp_code))
elif 'ipv6_src' in pkt and 'ipv6_dst' in pkt:
ethertype = ether.ETH_TYPE_IPV6
if 'router_solicit_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_SOLICIT))
elif 'neighbor_advert_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT,
data=icmpv6.nd_neighbor(
dst=pkt['neighbor_advert_ip'],
option=icmpv6.nd_option_sla(hw_src=pkt['eth_src']))))
elif 'neighbor_solicit_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_SOLICIT,
data=icmpv6.nd_neighbor(
dst=pkt['neighbor_solicit_ip'],
option=icmpv6.nd_option_sla(hw_src=pkt['eth_src']))))
elif 'echo_request_data' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ICMPV6_ECHO_REQUEST,
data=icmpv6.echo(id_=1, seq=1, data=pkt['echo_request_data'])))
layers.append(ipv6.ipv6(
src=pkt['ipv6_src'],
dst=pkt['ipv6_dst'],
nxt=inet.IPPROTO_ICMPV6))
elif 'ipv4_src' in pkt and 'ipv4_dst' in pkt:
ethertype = ether.ETH_TYPE_IP
proto = inet.IPPROTO_IP
if 'echo_request_data' in pkt:
echo = icmp.echo(id_=1, seq=1, data=pkt['echo_request_data'])
layers.append(icmp.icmp(type_=icmp.ICMP_ECHO_REQUEST, data=echo))
proto = inet.IPPROTO_ICMP
net = ipv4.ipv4(src=pkt['ipv4_src'], dst=pkt['ipv4_dst'], proto=proto)
layers.append(net)
elif 'actor_system' in pkt and 'partner_system' in pkt:
ethertype = ether.ETH_TYPE_SLOW
layers.append(slow.lacp(
version=1,
actor_system=pkt['actor_system'],
actor_port=1,
partner_system=pkt['partner_system'],
partner_port=1,
actor_key=1,
partner_key=1,
actor_system_priority=65535,
partner_system_priority=1,
actor_port_priority=255,
partner_port_priority=255,
actor_state_defaulted=0,
partner_state_defaulted=0,
actor_state_expired=0,
partner_state_expired=0,
actor_state_timeout=1,
partner_state_timeout=1,
actor_state_collecting=1,
partner_state_collecting=1,
actor_state_distributing=1,
partner_state_distributing=1,
actor_state_aggregation=1,
partner_state_aggregation=1,
actor_state_synchronization=1,
partner_state_synchronization=1,
actor_state_activity=0,
partner_state_activity=0))
elif 'chassis_id' in pkt and 'port_id' in pkt:
ethertype = ether.ETH_TYPE_LLDP
return valve_packet.lldp_beacon(
pkt['eth_src'], pkt['chassis_id'], str(pkt['port_id']), 1,
org_tlvs=pkt.get('org_tlvs', None),
system_name=pkt.get('system_name', None))
assert ethertype is not None, pkt
if 'vid' in pkt:
tpid = ether.ETH_TYPE_8021Q
layers.append(vlan.vlan(vid=pkt['vid'], ethertype=ethertype))
else:
tpid = ethertype
eth = ethernet.ethernet(
dst=pkt['eth_dst'],
src=pkt['eth_src'],
ethertype=tpid)
layers.append(eth)
result = serialize(layers)
return result
class ValveTestBases:
"""Insulate test base classes from unittest so we can reuse base clases."""
class ValveTestSmall(unittest.TestCase): # pytype: disable=module-attr
"""Base class for all Valve unit tests."""
DP = 's1'
DP_ID = 1
NUM_PORTS = 5
NUM_TABLES = 9
P1_V100_MAC = '00:00:00:01:00:01'
P2_V200_MAC = '00:00:00:02:00:02'
P3_V200_MAC = '00:00:00:02:00:03'
P1_V300_MAC = '00:00:00:03:00:01'
UNKNOWN_MAC = '00:00:00:04:00:04'
V100 = 0x100|ofp.OFPVID_PRESENT
V200 = 0x200|ofp.OFPVID_PRESENT
V300 = 0x300|ofp.OFPVID_PRESENT
LOGNAME = 'faucet'
last_flows_to_dp = {}
valve = None
valves_manager = None
metrics = None
bgp = None
table = None
logger = None
tmpdir = None
faucet_event_sock = None
registry = None
sock = None
notifier = None
config_file = None
_icmp_payload = bytes('A'*8, encoding='UTF-8') # pytype: disable=wrong-keyword-args
def setup_valve(self, config):
"""Set up test DP with config."""
self.tmpdir = tempfile.mkdtemp()
self.config_file = os.path.join(self.tmpdir, 'valve_unit.yaml')
self.faucet_event_sock = os.path.join(self.tmpdir, 'event.sock')
self.table = FakeOFTable(self.NUM_TABLES)
logfile = os.path.join(self.tmpdir, 'faucet.log')
self.logger = valve_util.get_logger(self.LOGNAME, logfile, logging.DEBUG, 0)
self.registry = CollectorRegistry()
# TODO: verify Prometheus variables
self.metrics = faucet_metrics.FaucetMetrics(reg=self.registry) # pylint: disable=unexpected-keyword-arg
# TODO: verify events
self.notifier = faucet_experimental_event.FaucetExperimentalEventNotifier(
self.faucet_event_sock, self.metrics, self.logger)
self.bgp = faucet_bgp.FaucetBgp(self.logger, self.metrics, self.send_flows_to_dp_by_id)
self.dot1x = faucet_dot1x.FaucetDot1x(
self.logger, self.metrics, self.send_flows_to_dp_by_id)
self.valves_manager = valves_manager.ValvesManager(
self.LOGNAME, self.logger, self.metrics, self.notifier,
self.bgp, self.dot1x, self.send_flows_to_dp_by_id)
self.last_flows_to_dp[self.DP_ID] = []
self.notifier.start()
self.update_config(config)
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.faucet_event_sock)
self.connect_dp()
def teardown_valve(self):
"""Tear down test DP."""
self.bgp.shutdown_bgp_speakers()
valve_util.close_logger(self.logger)
for valve in list(self.valves_manager.valves.values()):
valve.close_logs()
self.sock.close()
shutil.rmtree(self.tmpdir)
def tearDown(self):
self.teardown_valve()
def get_prom(self, var, labels=None):
"""Return a Prometheus variable value."""
if labels is None:
labels = {}
labels.update({
'dp_name': self.DP,
'dp_id': '0x%x' % self.DP_ID})
return self.registry.get_sample_value(var, labels)
def prom_inc(self, func, var, labels=None):
"""Check Prometheus variable increments by 1 after calling a function."""
before = self.get_prom(var, labels)
func()
self.assertTrue(before + 1, self.get_prom(var, labels))
def send_flows_to_dp_by_id(self, valve, flows):
"""Callback for ValvesManager to simulate sending flows to DP."""
valve = self.valves_manager.valves[self.DP_ID]
prepared_flows = valve.prepare_send_flows(flows)
self.last_flows_to_dp[valve.dp.dp_id] = prepared_flows
def update_config(self, config):
"""Update FAUCET config with config as text."""
self.assertFalse(self.valves_manager.config_watcher.files_changed())
existing_config = os.path.exists(self.config_file)
with open(self.config_file, 'w') as config_file:
config_file.write(config)
if existing_config:
self.assertTrue(self.valves_manager.config_watcher.files_changed())
self.last_flows_to_dp[self.DP_ID] = []
self.valves_manager.request_reload_configs(time.time(), self.config_file)
self.valve = self.valves_manager.valves[self.DP_ID]
if self.DP_ID in self.last_flows_to_dp:
reload_ofmsgs = self.last_flows_to_dp[self.DP_ID]
self.table.apply_ofmsgs(reload_ofmsgs)
def connect_dp(self):
"""Call DP connect and set all ports to up."""
discovered_up_ports = [port_no for port_no in range(1, self.NUM_PORTS + 1)]
self.table.apply_ofmsgs(self.valve.datapath_connect(time.time(), discovered_up_ports))
for port_no in discovered_up_ports:
self.set_port_up(port_no)
self.assertTrue(self.valve.dp.to_conf())
def set_port_down(self, port_no):
"""Set port status of port to down."""
self.table.apply_ofmsgs(self.valve.port_status_handler(
port_no, ofp.OFPPR_DELETE, ofp.OFPPS_LINK_DOWN))
def set_port_up(self, port_no):
"""Set port status of port to up."""
self.table.apply_ofmsgs(self.valve.port_status_handler(
port_no, ofp.OFPPR_ADD, 0))
def flap_port(self, port_no):
"""Flap op status on a port."""
self.set_port_down(port_no)
self.set_port_up(port_no)
@staticmethod
def packet_outs_from_flows(flows):
"""Return flows that are packetout actions."""
return [flow for flow in flows if isinstance(flow, valve_of.parser.OFPPacketOut)]
def learn_hosts(self):
"""Learn some hosts."""
# TODO: verify learn caching.
for _ in range(2):
self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
# TODO: verify host learning banned
self.rcv_packet(1, 0x100, {
'eth_src': self.UNKNOWN_MAC,
'eth_dst': self.P1_V100_MAC,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.1'})
self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': self.P3_V200_MAC,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3',
'vid': 0x200})
self.rcv_packet(3, 0x200, {
'eth_src': self.P3_V200_MAC,
'eth_dst': self.P2_V200_MAC,
'ipv4_src': '10.0.0.3',
'ipv4_dst': '10.0.0.4',
'vid': 0x200})
def verify_expiry(self):
"""Verify FIB resolution attempts expire."""
now = time.time()
for _ in range(self.valve.dp.max_host_fib_retry_count + 1):
now += (self.valve.dp.timeout * 2)
self.valve.state_expire(now)
self.valve.resolve_gateways(now)
# TODO: verify state expired
def verify_flooding(self, matches):
"""Verify flooding for a packet, depending on the DP implementation."""
combinatorial_port_flood = self.valve.dp.combinatorial_port_flood
if self.valve.dp.group_table:
combinatorial_port_flood = False
def _verify_flood_to_port(match, port, valve_vlan, port_number=None):
if valve_vlan.port_is_tagged(port):
vid = valve_vlan.vid|ofp.OFPVID_PRESENT
else:
vid = 0
if port_number is None:
port_number = port.number
return self.table.is_output(match, port=port_number, vid=vid)
for match in matches:
in_port_number = match['in_port']
in_port = self.valve.dp.ports[in_port_number]
if ('vlan_vid' in match and
match['vlan_vid'] & ofp.OFPVID_PRESENT is not 0):
valve_vlan = self.valve.dp.vlans[match['vlan_vid'] & ~ofp.OFPVID_PRESENT]
else:
valve_vlan = in_port.native_vlan
all_ports = set(
[port for port in self.valve.dp.ports.values() if port.running()])
remaining_ports = all_ports - set(
[port for port in valve_vlan.get_ports() if port.running])
hairpin_output = _verify_flood_to_port(
match, in_port, valve_vlan, ofp.OFPP_IN_PORT)
self.assertEqual(
in_port.hairpin, hairpin_output,
msg='hairpin flooding incorrect (expected %s got %s)' % (
in_port.hairpin, hairpin_output))
# Packet must be flooded to all ports on the VLAN.
if not self.valve.dp.stack or 'priority' in self.valve.dp.stack:
for port in valve_vlan.get_ports():
output = _verify_flood_to_port(match, port, valve_vlan)
if port == in_port:
self.assertNotEqual(
combinatorial_port_flood, output,
msg=('flooding to in_port (%s) not '
'compatible with flood mode (%s)') % (
output, combinatorial_port_flood))
continue
self.assertTrue(
output,
msg=('%s with unknown eth_dst not flooded'
' on VLAN %u to port %u' % (
match, valve_vlan.vid, port.number)))
# Packet must not be flooded to ports not on the VLAN.
for port in remaining_ports:
if port.stack:
self.assertTrue(
self.table.is_output(match, port=port.number),
msg=('Uknown eth_dst not flooded to stack port %s' % port))
elif not port.mirror:
self.assertFalse(
self.table.is_output(match, port=port.number),
msg=('Unknown eth_dst flooded to non-VLAN/stack/mirror %s' % port))
def rcv_packet(self, port, vid, match):
"""Simulate control plane receiving a packet on a port/VID."""
pkt = build_pkt(match)
vlan_pkt = pkt
# TODO: VLAN packet submitted to packet in always has VID
# Fake OF switch implementation should do this by applying actions.
if vid and vid not in match:
vlan_match = match
vlan_match['vid'] = vid
vlan_pkt = build_pkt(match)
msg = namedtuple(
'null_msg',
('match', 'in_port', 'data', 'total_len', 'cookie', 'reason'))(
{'in_port': port}, port, vlan_pkt.data, len(vlan_pkt.data),
self.valve.dp.cookie, valve_of.ofp.OFPR_ACTION)
self.last_flows_to_dp[self.DP_ID] = []
now = time.time()
self.prom_inc(
partial(self.valves_manager.valve_packet_in, now, self.valve, msg),
'of_packet_ins')
rcv_packet_ofmsgs = self.last_flows_to_dp[self.DP_ID]
self.table.apply_ofmsgs(rcv_packet_ofmsgs)
for valve_service in (
'resolve_gateways', 'advertise',
'send_lldp_beacons', 'state_expire'):
ofmsgs = self.valves_manager.valve_flow_services(
now, valve_service)
if ofmsgs:
self.table.apply_ofmsgs(ofmsgs)
self.valves_manager.update_metrics(now)
return rcv_packet_ofmsgs
class ValveTestBig(ValveTestSmall):
"""Test basic switching/L2/L3 functions."""
def setUp(self):
self.setup_valve(CONFIG)
def test_get_config_dict(self):
"""Test API call for DP config."""
# TODO: test actual config contents.
self.assertTrue(self.valve.get_config_dict())
self.assertTrue(self.valve.dp.get_tables())
def test_notifier_socket_path(self):
"""Test notifier socket path checker."""
new_path = os.path.join(self.tmpdir, 'new_path/new_socket')
self.assertEqual(self.notifier.check_path(new_path), new_path)
stale_socket = os.path.join(self.tmpdir, 'stale_socket')
with open(stale_socket, 'w') as stale_socket_file:
stale_socket_file.write('')
self.assertEqual(self.notifier.check_path(stale_socket), stale_socket)
def test_disconnect(self):
"""Test disconnection of DP from controller."""
# TODO: verify DP state change.
self.valve.datapath_disconnect()
def test_oferror(self):
"""Test OFError handler."""
datapath = None
msg = valve_of.parser.OFPFlowMod(datapath=datapath)
msg.xid = 123
self.valve.recent_ofmsgs.append(msg)
test_error = valve_of.parser.OFPErrorMsg(datapath=datapath, msg=msg)
self.valve.oferror(test_error)
def test_switch_features(self):
"""Test switch features handler."""
self.assertTrue(
isinstance(self.valve, TfmValve),
msg=type(self.valve))
features_flows = self.valve.switch_features(None)
tfm_flows = [
flow for flow in features_flows if isinstance(
flow, valve_of.parser.OFPTableFeaturesStatsRequest)]
# TODO: verify TFM content.
self.assertTrue(tfm_flows)
def test_pkt_meta(self):
"""Test bad fields in OFPacketIn."""
msg = parser.OFPPacketIn(datapath=None)
self.assertEqual(None, self.valve.parse_pkt_meta(msg))
msg.cookie = self.valve.dp.cookie
self.assertEqual(None, self.valve.parse_pkt_meta(msg))
msg.reason = valve_of.ofp.OFPR_ACTION
self.assertEqual(None, self.valve.parse_pkt_meta(msg))
msg.match = parser.OFPMatch(in_port=1)
self.assertEqual(None, self.valve.parse_pkt_meta(msg))
msg.data = b'1234'
self.assertEqual(None, self.valve.parse_pkt_meta(msg))
def test_loop_protect(self):
"""Learn loop protection."""
for _ in range(2):
self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
self.rcv_packet(2, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2',
'vid': 0x100})
def test_lldp(self):
"""Test LLDP reception."""
self.assertFalse(self.rcv_packet(1, 0, {
'eth_src': self.P1_V100_MAC,
'eth_dst': lldp.LLDP_MAC_NEAREST_BRIDGE,
'chassis_id': self.P1_V100_MAC,
'port_id': 1}))
def test_arp_for_controller(self):
"""ARP request for controller VIP."""
arp_replies = self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': mac.BROADCAST_STR,
'arp_source_ip': '10.0.0.1',
'arp_target_ip': '10.0.0.254'})
# TODO: check ARP reply is valid
self.assertTrue(self.packet_outs_from_flows(arp_replies))
def test_arp_reply_from_host(self):
"""ARP reply for host."""
arp_replies = self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': FAUCET_MAC,
'arp_source_ip': '10.0.0.1',
'arp_target_ip': '10.0.0.254'})
# TODO: check ARP reply is valid
self.assertTrue(arp_replies)
self.assertFalse(self.packet_outs_from_flows(arp_replies))
def test_nd_for_controller(self):
"""IPv6 ND for controller VIP."""
dst_ip = ipaddress.IPv6Address('fc00::1:254')
nd_mac = valve_packet.ipv6_link_eth_mcast(dst_ip)
ip_gw_mcast = valve_packet.ipv6_solicited_node_from_ucast(dst_ip)
nd_replies = self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': nd_mac,
'vid': 0x200,
'ipv6_src': 'fc00::1:1',
'ipv6_dst': str(ip_gw_mcast),
'neighbor_solicit_ip': str(dst_ip)})
# TODO: check ND reply is valid
self.assertTrue(self.packet_outs_from_flows(nd_replies))
def test_nd_from_host(self):
"""IPv6 NA from host."""
na_replies = self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': FAUCET_MAC,
'vid': 0x200,
'ipv6_src': 'fc00::1:1',
'ipv6_dst': 'fc00::1:254',
'neighbor_advert_ip': 'fc00::1:1'})
# TODO: check NA response flows are valid
self.assertTrue(na_replies)
self.assertFalse(self.packet_outs_from_flows(na_replies))
def test_ra_for_controller(self):
"""IPv6 RA for controller."""
router_solicit_ip = 'ff02::2'
ra_replies = self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': '33:33:00:00:00:02',
'vid': 0x200,
'ipv6_src': 'fe80::1:1',
'ipv6_dst': router_solicit_ip,
'router_solicit_ip': router_solicit_ip})
# TODO: check RA is valid
self.assertTrue(self.packet_outs_from_flows(ra_replies))
def test_icmp_ping_controller(self):
"""IPv4 ping controller VIP."""
echo_replies = self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': FAUCET_MAC,
'vid': 0x100,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.254',
'echo_request_data': bytes(
'A'*8, encoding='UTF-8')}) # pytype: disable=wrong-keyword-args
# TODO: check ping response
self.assertTrue(self.packet_outs_from_flows(echo_replies))
def test_add_del_route(self):
"""IPv4 add/del of a route."""
arp_replies = self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': mac.BROADCAST_STR,
'arp_source_ip': '10.0.0.1',
'arp_target_ip': '10.0.0.254'})
# TODO: check ARP reply is valid
self.assertTrue(self.packet_outs_from_flows(arp_replies))
valve_vlan = self.valve.dp.vlans[0x100]
ip_dst = ipaddress.IPv4Network('10.100.100.0/24')
ip_gw = ipaddress.IPv4Address('10.0.0.1')
route_add_replies = self.valve.add_route(
valve_vlan, ip_gw, ip_dst)
# TODO: check add flows.
self.assertTrue(route_add_replies)
route_del_replies = self.valve.del_route(
valve_vlan, ip_dst)
# TODO: check del flows.
self.assertTrue(route_del_replies)
def test_host_ipv4_fib_route(self):
"""Test learning a FIB rule for an IPv4 host."""
fib_route_replies = self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vid': 0x100,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.4',
'echo_request_data': bytes(
'A'*8, encoding='UTF-8')}) # pytype: disable=wrong-keyword-args
# TODO: verify learning rule contents
# We want to know this host was learned we did not get packet outs.
self.assertTrue(fib_route_replies)
self.assertFalse(self.packet_outs_from_flows(fib_route_replies))
self.verify_expiry()
def test_host_ipv6_fib_route(self):
"""Test learning a FIB rule for an IPv6 host."""
fib_route_replies = self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vid': 0x200,
'ipv6_src': 'fc00::1:2',
'ipv6_dst': 'fc00::1:4',
'echo_request_data': self._icmp_payload})
# TODO: verify learning rule contents
# We want to know this host was learned we did not get packet outs.
self.assertTrue(fib_route_replies)
self.assertFalse(self.packet_outs_from_flows(fib_route_replies))
self.verify_expiry()
def test_icmp_ping_unknown_neighbor(self):
"""IPv4 ping unknown host on same subnet, causing proactive learning."""
echo_replies = self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': FAUCET_MAC,
'vid': 0x100,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.99',
'echo_request_data': self._icmp_payload})
# TODO: check proactive neighbor resolution
self.assertTrue(self.packet_outs_from_flows(echo_replies))
def test_icmp_ping6_unknown_neighbor(self):
"""IPv4 ping unknown host on same subnet, causing proactive learning."""
echo_replies = self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': FAUCET_MAC,
'vid': 0x200,
'ipv6_src': 'fc00::1:2',
'ipv6_dst': 'fc00::1:4',
'echo_request_data': self._icmp_payload})
# TODO: check proactive neighbor resolution
self.assertTrue(self.packet_outs_from_flows(echo_replies))
def test_icmpv6_ping_controller(self):
"""IPv6 ping controller VIP."""
echo_replies = self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': FAUCET_MAC,
'vid': 0x200,
'ipv6_src': 'fc00::1:1',
'ipv6_dst': 'fc00::1:254',
'echo_request_data': self._icmp_payload})
# TODO: check ping response
self.assertTrue(self.packet_outs_from_flows(echo_replies))
def test_invalid_vlan(self):
"""Test that packets with incorrect vlan tagging get dropped."""
matches = [
{'in_port': 1, 'vlan_vid': 18|ofp.OFPVID_PRESENT},
{'in_port': 1, 'vlan_vid': self.V100},
{'in_port': 3, 'vlan_vid': 0}]
for match in matches:
self.assertFalse(
self.table.is_output(match),
msg='Packets with incorrect vlan tags are output')
def test_unknown_eth_src(self):
"""Test that packets from unknown macs are sent to controller.
Untagged packets should have VLAN tags pushed before they are sent to
the controller.
"""
matches = [
{'in_port': 1, 'vlan_vid': 0},
{'in_port': 1, 'vlan_vid': 0, 'eth_src' : self.UNKNOWN_MAC},
{
'in_port': 1,
'vlan_vid': 0,
'eth_src' : self.P2_V200_MAC
},
{'in_port': 2, 'vlan_vid': 0, 'eth_dst' : self.UNKNOWN_MAC},
{'in_port': 2, 'vlan_vid': 0},
{
'in_port': 2,
'vlan_vid': self.V100,
'eth_src' : self.P2_V200_MAC
},
{
'in_port': 2,
'vlan_vid': self.V100,
'eth_src' : self.UNKNOWN_MAC,
'eth_dst' : self.P1_V100_MAC
},
]
for match in matches:
if match['vlan_vid'] != 0:
vid = match['vlan_vid']
else:
vid = self.valve.dp.get_native_vlan(match['in_port']).vid
vid = vid|ofp.OFPVID_PRESENT
self.assertTrue(
self.table.is_output(match, ofp.OFPP_CONTROLLER, vid=vid),
msg="Packet with unknown ethernet src not sent to controller: "
"{0}".format(match))
def test_unknown_eth_dst_rule(self):
"""Test that packets with unkown eth dst addrs get flooded correctly.
They must be output to each port on the associated vlan, with the
correct vlan tagging. And they must not be forwarded to a port not
on the associated vlan
"""
self.learn_hosts()
matches = [
{
'in_port': 3,
'vlan_vid': self.V100,
},
{
'in_port': 2,
'vlan_vid': 0,
'eth_dst': self.P1_V100_MAC
},
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V100_MAC
},
{
'in_port': 3,
'vlan_vid': self.V200,
'eth_src': self.P2_V200_MAC,
}
]
self.verify_flooding(matches)
def test_known_eth_src_rule(self):
"""Test that packets with known eth src addrs are not sent to controller."""
self.learn_hosts()
matches = [
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V100_MAC
},
{
'in_port': 2,
'vlan_vid': self.V200,
'eth_src': self.P2_V200_MAC
},
{
'in_port': 3,
'vlan_vid': self.V200,
'eth_src': self.P3_V200_MAC,
'eth_dst': self.P2_V200_MAC
}
]
for match in matches:
self.assertFalse(
self.table.is_output(match, port=ofp.OFPP_CONTROLLER),
msg="Packet ({0}) output to controller when eth_src address"
" is known".format(match))
def test_known_eth_src_deletion(self):
"""Verify that when a mac changes port the old rules get deleted.
If a mac address is seen on one port, then seen on a different port on
the same vlan the rules associated with that mac address on previous
port need to be deleted. IE packets with that mac address arriving on
the old port should be output to the controller."""
self.rcv_packet(3, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vlan_vid': 0x200,
'ipv4_src': '10.0.0.3',
'ipv4_dst': '10.0.0.3'})
match = {'in_port': 2, 'vlan_vid': 0, 'eth_src': self.P2_V200_MAC}
self.assertTrue(
self.table.is_output(match, port=ofp.OFPP_CONTROLLER),
msg='eth src rule not deleted when mac seen on another port')
def test_known_eth_dst_rule(self):
"""Test that packets with known eth dst addrs are output correctly.
Output to the correct port with the correct vlan tagging."""
self.learn_hosts()
match_results = [
({
'in_port': 2,
'vlan_vid': self.V100,
'eth_dst': self.P1_V100_MAC
}, {
'out_port': 1,
'vlan_vid': 0
}),
({
'in_port': 3,
'vlan_vid': self.V200,
'eth_dst': self.P2_V200_MAC,
'eth_src': self.P3_V200_MAC
}, {
'out_port': 2,
'vlan_vid': 0,
})
]
for match, result in match_results:
self.assertTrue(
self.table.is_output(
match, result['out_port'], vid=result['vlan_vid']),
msg='packet not output to port correctly when eth dst is known')
incorrect_ports = set(range(1, self.NUM_PORTS + 1))
incorrect_ports.remove(result['out_port'])
for port in incorrect_ports:
self.assertFalse(
self.table.is_output(match, port=port),
msg=('packet %s output to incorrect port %u when eth_dst '
'is known' % (match, port)))
self.verify_expiry()
def test_mac_learning_vlan_separation(self):
"""Test that when a mac is seen on a second vlan the original vlan
rules are unaffected."""
self.learn_hosts()
self.rcv_packet(2, 0x200, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vlan_vid': 0x200,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3'})
# check eth_src rule
match1 = {'in_port': 1, 'vlan_vid': 0, 'eth_src': self.P1_V100_MAC}
self.assertFalse(
self.table.is_output(match1, ofp.OFPP_CONTROLLER),
msg=('mac address being seen on a vlan affects eth_src rule on '
'other vlan'))
# check eth_dst rule
match2 = {'in_port': 3, 'vlan_vid': self.V100, 'eth_dst': self.P1_V100_MAC}
self.assertTrue(
self.table.is_output(match2, port=1, vid=0),
msg=('mac address being seen on a vlan affects eth_dst rule on '
'other vlan'))
for port in (2, 4):
self.assertFalse(
self.table.is_output(match2, port=port),
msg=('mac address being seen on a vlan affects eth_dst rule on '
'other vlan'))
def test_known_eth_dst_rule_deletion(self):
"""Test that eth_dst rules are deleted when the mac is learned on
another port.
This should only occur when the mac is seen on the same vlan."""
self.rcv_packet(2, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3'})
match = {'in_port': 3, 'vlan_vid': self.V100, 'eth_dst': self.P1_V100_MAC}
self.assertTrue(
self.table.is_output(match, port=2, vid=self.V100),
msg='Packet not output correctly after mac is learnt on new port')
self.assertFalse(
self.table.is_output(match, port=1),
msg='Packet output on old port after mac is learnt on new port')
def test_port_delete_eth_dst_removal(self):
"""Test that when a port is disabled packets are correctly output. """
match = {'in_port': 2, 'vlan_vid': self.V100, 'eth_dst': self.P1_V100_MAC}
valve_vlan = self.valve.dp.vlans[match['vlan_vid'] & ~ofp.OFPVID_PRESENT]
ofmsgs = self.valve.port_delete(port_num=1)
self.table.apply_ofmsgs(ofmsgs)
# Check packets are output to each port on vlan
for port in valve_vlan.get_ports():
if port.number != match['in_port'] and port.running():
if valve_vlan.port_is_tagged(port):
vid = valve_vlan.vid|ofp.OFPVID_PRESENT
else:
vid = 0
self.assertTrue(
self.table.is_output(match, port=port.number, vid=vid),
msg=('packet %s with eth dst learnt on deleted port not output '
'correctly on vlan %u to port %u' % (
match, valve_vlan.vid, port.number)))
def test_port_down_eth_src_removal(self):
"""Test that when a port goes down and comes back up learnt mac
addresses are deleted."""
match = {'in_port': 1, 'vlan_vid': 0, 'eth_src': self.P1_V100_MAC}
self.flap_port(1)
self.assertTrue(
self.table.is_output(match, port=ofp.OFPP_CONTROLLER),
msg='Packet not output to controller after port bounce')
def test_port_add_input(self):
"""Test that when a port is enabled packets are input correctly."""
match = {'in_port': 1, 'vlan_vid': 0}
self.table.apply_ofmsgs(
self.valve.port_delete(port_num=1))
self.assertFalse(
self.table.is_output(match, port=2, vid=self.V100),
msg='Packet output after port delete')
self.table.apply_ofmsgs(
self.valve.port_add(port_num=1))
self.assertTrue(
self.table.is_output(match, port=2, vid=self.V100),
msg='Packet not output after port add')
def test_port_acl_deny(self):
"""Test that port ACL denies forwarding."""
acl_config = """
dps:
s1:
hardware: 'Open vSwitch'
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
acl_in: drop_non_ospf_ipv4
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
native_vlan: v300
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
v300:
vid: 0x300
acls:
drop_non_ospf_ipv4:
- rule:
cookie: 0x1234
description: 'a description'
nw_dst: '224.0.0.5'
dl_type: 0x800
actions:
meter: testmeter
allow: 1
- rule:
dl_type: 0x800
actions:
allow: 0
meters:
testmeter:
meter_id: 99
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 1
}
]
""" % DP1_CONFIG
drop_match = {
'in_port': 2,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '192.0.2.1'}
accept_match = {
'in_port': 2,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5'}
# base case
for match in (drop_match, accept_match):
self.assertTrue(
self.table.is_output(match, port=3, vid=self.V200),
msg='Packet not output before adding ACL')
self.update_config(acl_config)
self.assertFalse(
self.table.is_output(drop_match),
msg='packet not blocked by ACL')
self.assertTrue(
self.table.is_output(accept_match, port=3, vid=self.V200),
msg='packet not allowed by ACL')
def test_lldp_beacon(self):
"""Test LLDP beacon service."""
# TODO: verify LLDP packet content.
self.assertTrue(self.valve.send_lldp_beacons(time.time()))
def test_unknown_port(self):
"""Test port status change for unknown port handled."""
self.set_port_up(99)
def test_port_modify(self):
"""Set port status modify."""
for port_status in (0, 1):
self.table.apply_ofmsgs(self.valve.port_status_handler(
1, ofp.OFPPR_MODIFY, port_status))
def test_unknown_port_status(self):
"""Test unknown port status message."""
known_messages = set([ofp.OFPPR_MODIFY, ofp.OFPPR_ADD, ofp.OFPPR_DELETE])
unknown_messages = list(set(range(0, len(known_messages) + 1)) - known_messages)
self.assertTrue(unknown_messages)
self.assertFalse(self.valve.port_status_handler(
1, unknown_messages[0], 1))
def test_move_port(self):
"""Test host moves a port."""
self.rcv_packet(2, 0x200, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vlan_vid': 0x200,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3'})
self.rcv_packet(4, 0x200, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vlan_vid': 0x200,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3'})
def test_bgp_route_change(self):
"""Test BGP route change handler."""
nexthop = '10.0.0.1'
prefix = '192.168.1.1/32'
add_event = RouteAddition(
IPPrefix.from_string(prefix),
IPAddress.from_string(nexthop),
'65001',
'IGP'
)
del_event = RouteRemoval(
IPPrefix.from_string(prefix),
)
self.bgp._bgp_route_handler(add_event, self.DP_ID, 0x100)
self.bgp._bgp_route_handler(del_event, self.DP_ID, 0x100)
self.bgp._bgp_up_handler(nexthop, 65001)
self.bgp._bgp_down_handler(nexthop, 65001)
def test_packet_in_rate(self):
"""Test packet in rate limit triggers."""
now = time.time()
for _ in range(self.valve.dp.ignore_learn_ins * 2 + 1):
if self.valve.rate_limit_packet_ins(now):
return
self.fail('packet in rate limit not triggered')
def test_ofdescstats_handler(self):
"""Test OFDescStatsReply handler."""
body = parser.OFPDescStats(
mfr_desc=u'test_mfr_desc'.encode(),
hw_desc=u'test_hw_desc'.encode(),
sw_desc=u'test_sw_desc'.encode(),
serial_num=u'99'.encode(),
dp_desc=u'test_dp_desc'.encode())
self.valve.ofdescstats_handler(body)
invalid_body = parser.OFPDescStats(
mfr_desc=b'\x80',
hw_desc=b'test_hw_desc',
sw_desc=b'test_sw_desc',
serial_num=b'99',
dp_desc=b'test_dp_desc')
self.valve.ofdescstats_handler(invalid_body)
class ValveTestCase(ValveTestBases.ValveTestBig):
"""Run complete set of basic tests."""
pass
class ValveChangePortCase(ValveTestBases.ValveTestSmall):
"""Test changes to config on ports."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x200
permanent_learn: True
""" % DP1_CONFIG
LESS_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x200
permanent_learn: False
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_delete_port(self):
"""Test port can be deleted."""
self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': self.P3_V200_MAC,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3',
'vid': 0x200})
self.update_config(self.LESS_CONFIG)
class ValveACLTestCase(ValveTestBases.ValveTestSmall):
"""Test ACL drop/allow and reloading."""
def setUp(self):
self.setup_valve(CONFIG)
def test_vlan_acl_deny(self):
"""Test VLAN ACL denies a packet."""
acl_config = """
dps:
s1:
hardware: 'Open vSwitch'
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
native_vlan: v300
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
acl_in: drop_non_ospf_ipv4
v300:
vid: 0x300
acls:
drop_non_ospf_ipv4:
- rule:
nw_dst: '224.0.0.5'
dl_type: 0x800
actions:
allow: 1
- rule:
dl_type: 0x800
actions:
allow: 0
""" % DP1_CONFIG
drop_match = {
'in_port': 2,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '192.0.2.1'}
accept_match = {
'in_port': 2,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5'}
# base case
for match in (drop_match, accept_match):
self.assertTrue(
self.table.is_output(match, port=3, vid=self.V200),
msg='Packet not output before adding ACL')
self.update_config(acl_config)
self.flap_port(2)
self.assertFalse(
self.table.is_output(drop_match),
msg='Packet not blocked by ACL')
self.assertTrue(
self.table.is_output(accept_match, port=3, vid=self.V200),
msg='Packet not allowed by ACL')
class ValveRootStackTestCase(ValveTestBases.ValveTestSmall):
"""Test stacking/forwarding."""
DP = 's3'
DP_ID = 0x3
def setUp(self):
self.setup_valve(CONFIG)
def test_stack_learn(self):
"""Test host learning on stack root."""
self.rcv_packet(1, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
self.rcv_packet(5, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vid': 0x300,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
def test_stack_flood(self):
"""Test packet flooding when stacking."""
matches = [
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V300_MAC
}]
self.verify_flooding(matches)
class ValveEdgeStackTestCase(ValveTestBases.ValveTestSmall):
"""Test stacking/forwarding."""
DP = 's4'
DP_ID = 0x4
def setUp(self):
self.setup_valve(CONFIG)
def test_stack_learn(self):
"""Test host learning on non-root switch."""
self.rcv_packet(1, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
self.rcv_packet(5, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vid': 0x300,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
def test_stack_flood(self):
"""Test packet flooding when stacking."""
matches = [
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V300_MAC
}]
self.verify_flooding(matches)
class ValveStackProbeTestCase(ValveTestBases.ValveTestSmall):
"""Test stack link probing."""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
stack:
dp: s2
port: 1
2:
stack:
dp: s2
port: 2
3:
native_vlan: v100
s2:
hardware: 'Open vSwitch'
dp_id: 0x2
lldp_beacon:
send_interval: 5
max_per_interval: 1
interfaces:
1:
stack:
dp: s1
port: 1
2:
stack:
dp: s1
port: 2
3:
native_vlan: v100
s3:
dp_id: 0x3
interfaces:
1:
native_vlan: v100
vlans:
v100:
vid: 100
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def rcv_lldp(self, other_dp, other_port):
"""Receive an LLDP packet"""
tlvs = []
tlvs.extend(valve_packet.faucet_lldp_tlvs(other_dp))
tlvs.extend(valve_packet.faucet_lldp_stack_state_tlvs(other_dp, other_port))
self.rcv_packet(1, 0, {
'eth_src': FAUCET_MAC,
'eth_dst': lldp.LLDP_MAC_NEAREST_BRIDGE,
'port_id': other_port.number,
'chassis_id': FAUCET_MAC,
'system_name': other_dp.name,
'org_tlvs': tlvs})
def test_stack_probe(self):
"""Test probing works correctly."""
stack_port = self.valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
self.valve.update_stack_link_states(time.time())
self.assertTrue(stack_port.is_stack_down())
for change_func, check_func in [
('stack_init', 'is_stack_init'),
('stack_up', 'is_stack_up'),
('stack_down', 'is_stack_down')]:
getattr(other_port, change_func)()
self.rcv_lldp(other_dp, other_port)
self.assertTrue(getattr(stack_port, check_func)())
def test_stack_miscabling(self):
"""Test probing stack with miscabling."""
stack_port = self.valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
wrong_port = other_dp.ports[2]
wrong_dp = self.valves_manager.valves[3].dp
for remote_dp, remote_port in [
(wrong_dp, other_port),
(other_dp, wrong_port)]:
self.rcv_lldp(other_dp, other_port)
self.assertTrue(stack_port.is_stack_init())
self.rcv_lldp(remote_dp, remote_port)
self.assertTrue(stack_port.is_stack_down())
def test_stack_lost_lldp(self):
"""Test stacking when LLDP packets get dropped"""
stack_port = self.valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
self.rcv_lldp(other_dp, other_port)
self.assertTrue(stack_port.is_stack_init())
self.valve.update_stack_link_states(time.time() + 300) # simulate packet loss
self.assertTrue(stack_port.is_stack_down())
class ValveGroupRoutingTestCase(ValveTestBases.ValveTestSmall):
"""Tests for datapath with group support."""
CONFIG = """
dps:
s1:
hardware: 'GenericTFM'
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
output_only: True
mirror: 4
vlans:
v100:
vid: 0x100
faucet_vips: ['10.0.0.254/24']
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
- route:
ip_dst: 10.99.98.0/24
ip_gw: 10.0.0.99
v200:
vid: 0x200
""" % GROUP_ROUTING_DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_host_ipv4_fib_route(self):
"""Test learning a FIB rule for an IPv4 host."""
fib_route_replies = self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vid': 0x100,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.4',
'echo_request_data': self._icmp_payload})
# TODO: verify learning rule contents
# We want to know this host was learned we did not get packet outs.
self.assertTrue(fib_route_replies)
self.assertFalse(self.packet_outs_from_flows(fib_route_replies))
class ValveGroupTestCase(ValveTestBases.ValveTestSmall):
"""Tests for datapath with group support."""
CONFIG = """
dps:
s1:
hardware: 'GenericTFM'
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
output_only: True
mirror: 4
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
""" % GROUP_DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_unknown_eth_dst_rule(self):
"""Test that packets with unkown eth dst addrs get flooded correctly.
They must be output to each port on the associated vlan, with the
correct vlan tagging. And they must not be forwarded to a port not
on the associated vlan
"""
self.learn_hosts()
matches = [
{
'in_port': 3,
'vlan_vid': self.V100,
},
{
'in_port': 2,
'vlan_vid': 0,
'eth_dst': self.P1_V100_MAC
},
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V100_MAC
},
{
'in_port': 3,
'vlan_vid': self.V200,
'eth_src': self.P2_V200_MAC,
}
]
self.verify_flooding(matches)
class ValveIdleLearnTestCase(ValveTestBases.ValveTestSmall):
"""Smoke test for idle-flow based learning. This feature is not currently reliable."""
CONFIG = """
dps:
s1:
hardware: 'GenericTFM'
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
output_only: True
mirror: 4
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
""" % IDLE_DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_known_eth_src_rule(self):
"""Test removal flow handlers."""
self.learn_hosts()
self.assertTrue(
self.valve.flow_timeout(
time.time(),
self.valve.dp.tables['eth_dst'].table_id,
{'vlan_vid': self.V100, 'eth_dst': self.P1_V100_MAC}))
self.assertFalse(
self.valve.flow_timeout(
time.time(),
self.valve.dp.tables['eth_src'].table_id,
{'vlan_vid': self.V100, 'in_port': 1, 'eth_src': self.P1_V100_MAC}))
class ValveLACPTestCase(ValveTestBases.ValveTestSmall):
"""Test LACP."""
CONFIG = """
dps:
s1:
hardware: 'GenericTFM'
%s
interfaces:
p1:
number: 1
native_vlan: v100
lacp: 1
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
tagged_vlans: [v300]
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
v300:
vid: 0x300
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_lacp(self):
"""Test LACP comes up."""
# TODO: verify LACP state
self.rcv_packet(1, 0, {
'actor_system': '0e:00:00:00:00:02',
'partner_system': FAUCET_MAC,
'eth_dst': slow.SLOW_PROTOCOL_MULTICAST,
'eth_src': '0e:00:00:00:00:02'})
self.learn_hosts()
self.verify_expiry()
class ValveReloadConfigTestCase(ValveTestBases.ValveTestBig):
"""Repeats the tests after a config reload."""
def setUp(self):
super(ValveReloadConfigTestCase, self).setUp()
self.flap_port(1)
self.update_config(CONFIG)
class ValveMirrorTestCase(ValveTestBases.ValveTestBig):
"""Test ACL and interface mirroring."""
# TODO: check mirror packets are present/correct
CONFIG = """
acls:
mirror_ospf:
- rule:
nw_dst: '224.0.0.5'
dl_type: 0x800
actions:
mirror: p5
allow: 1
- rule:
dl_type: 0x800
actions:
allow: 0
- rule:
actions:
allow: 1
dps:
s1:
hardware: 'GenericTFM'
%s
interfaces:
p1:
number: 1
native_vlan: v100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
acls_in: [mirror_ospf]
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
output_only: True
mirror: 4
vlans:
v100:
vid: 0x100
faucet_vips: ['10.0.0.254/24']
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
- route:
ip_dst: 10.99.98.0/24
ip_gw: 10.0.0.99
v200:
vid: 0x200
faucet_vips: ['fc00::1:254/112', 'fe80::1:254/64']
bgp_port: 9179
bgp_server_addresses: ['127.0.0.1']
bgp_as: 1
bgp_routerid: '1.1.1.1'
bgp_neighbor_addresses: ['127.0.0.1']
bgp_neighbor_as: 2
bgp_connect_mode: 'passive'
routes:
- route:
ip_dst: 'fc00::10:0/112'
ip_gw: 'fc00::1:1'
- route:
ip_dst: 'fc00::20:0/112'
ip_gw: 'fc00::1:99'
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
class RyuAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr
"""Test bare instantiation of controller classes."""
@staticmethod
def _fake_dp():
datapath = namedtuple('datapath', ['id', 'close'])(0, lambda: None)
return datapath
def test_faucet(self):
"""Test FAUCET can be initialized."""
os.environ['FAUCET_CONFIG'] = '/dev/null'
os.environ['FAUCET_LOG'] = '/dev/null'
os.environ['FAUCET_EXCEPTION_LOG'] = '/dev/null'
ryu_app = faucet.Faucet(
dpset={},
faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI(),
reg=CollectorRegistry())
ryu_app.reload_config(None)
self.assertFalse(ryu_app._config_files_changed())
ryu_app.metric_update(None)
ryu_app.get_config()
ryu_app.get_tables(0)
event_dp = dpset.EventDPReconnected(dp=self._fake_dp())
for enter in (True, False):
event_dp.enter = enter
ryu_app.connect_or_disconnect_handler(event_dp)
for event_handler in (
ryu_app.error_handler,
ryu_app.features_handler,
ryu_app.packet_in_handler,
ryu_app.desc_stats_reply_handler,
ryu_app.port_status_handler,
ryu_app.flowremoved_handler,
ryu_app.reconnect_handler,
ryu_app._datapath_connect,
ryu_app._datapath_disconnect):
msg = namedtuple('msg', ['datapath'])(self._fake_dp())
event = EventOFPMsgBase(msg=msg)
event.dp = msg.datapath
event_handler(event)
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
|
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import jinja2
import os
import re
import subprocess
import sys
import tempfile
import time
import yaml
from osc_lib.i18n import _
from oslo_log import log as logging
from tripleo_common.image import base
from tripleo_common.image import image_uploader
from tripleo_common.utils.locks import threadinglock
CONTAINER_IMAGE_PREPARE_PARAM_STR = None
CONTAINER_IMAGE_PREPARE_PARAM = None
CONTAINER_IMAGES_DEFAULTS = None
def init_prepare_defaults(defaults_file):
global CONTAINER_IMAGE_PREPARE_PARAM_STR
with open(defaults_file) as f:
CONTAINER_IMAGE_PREPARE_PARAM_STR = f.read()
global CONTAINER_IMAGE_PREPARE_PARAM
p = yaml.safe_load(CONTAINER_IMAGE_PREPARE_PARAM_STR)
CONTAINER_IMAGE_PREPARE_PARAM = p[
'parameter_defaults']['ContainerImagePrepare']
global CONTAINER_IMAGES_DEFAULTS
CONTAINER_IMAGES_DEFAULTS = CONTAINER_IMAGE_PREPARE_PARAM[0]['set']
DEFAULT_TEMPLATE_DIR = os.path.join(sys.prefix, 'share', 'tripleo-common',
'container-images')
DEFAULT_TEMPLATE_FILE = os.path.join(DEFAULT_TEMPLATE_DIR,
'tripleo_containers.yaml.j2')
DEFAULT_PREPARE_FILE = os.path.join(DEFAULT_TEMPLATE_DIR,
'container_image_prepare_defaults.yaml')
if os.path.isfile(DEFAULT_PREPARE_FILE):
init_prepare_defaults(DEFAULT_PREPARE_FILE)
LOG = logging.getLogger(__name__ + '.KollaImageBuilder')
def _filter_services(service_list, resource_registry):
if resource_registry:
for service in service_list.copy():
env_path = resource_registry.get(service)
if env_path == 'OS::Heat::None':
service_list.remove(service)
def get_enabled_services(environment, roles_data):
"""Build a map of role name and default enabled services
:param environment: Heat environment for deployment
:param roles_data: Roles file data used to filter services
:returns: set of resource types representing enabled services
"""
enabled_services = {}
parameter_defaults = environment.get('parameter_defaults', {})
resource_registry = environment.get('resource_registry', {})
for role in roles_data:
count = parameter_defaults.get('%sCount' % role['name'],
role.get('CountDefault', 0))
try:
count = int(count)
except ValueError:
raise ValueError('Unable to convert %sCount to an int: %s' %
(role['name'], count))
param = '%sServices' % role['name']
if count > 0:
if param in parameter_defaults:
enabled_services[param] = parameter_defaults[param]
else:
default_services = role.get('ServicesDefault', [])
_filter_services(default_services, resource_registry)
enabled_services[param] = default_services
else:
enabled_services[param] = []
return enabled_services
def build_service_filter(environment, roles_data):
"""Build list of containerized services
:param environment: Heat environment for deployment
:param roles_data: Roles file data used to filter services
:returns: set of resource types representing containerized services
"""
if not roles_data:
return None
filtered_services = set()
enabled_services = get_enabled_services(environment, roles_data)
resource_registry = environment.get('resource_registry')
for role in roles_data:
role_services = enabled_services.get(
'%sServices' % role['name'], set())
# This filtering is probably not required, but filter if the
# {{role.name}}Services has services mapped to OS::Heat::None
_filter_services(role_services, resource_registry)
filtered_services.update(role_services)
return filtered_services
def set_neutron_driver(pd, mapping_args):
"""Set the neutron_driver images variable based on parameters
:param pd: Parameter defaults from the environment
:param mapping_args: Dict to set neutron_driver value on
"""
if mapping_args.get('neutron_driver'):
return
if not pd or 'NeutronMechanismDrivers' not in pd:
# we should set default neutron driver
mapping_args['neutron_driver'] = 'ovn'
else:
nmd = pd['NeutronMechanismDrivers']
if 'ovn' in nmd:
mapping_args['neutron_driver'] = 'ovn'
else:
mapping_args['neutron_driver'] = 'other'
def container_images_prepare_multi(environment, roles_data, dry_run=False,
cleanup=image_uploader.CLEANUP_FULL,
lock=None):
"""Perform multiple container image prepares and merge result
Given the full heat environment and roles data, perform multiple image
prepare operations. The data to drive the multiple prepares is taken from
the ContainerImagePrepare parameter in the provided environment. If
push_destination is specified, uploads will be performed during the
preparation.
:param environment: Heat environment for deployment
:param roles_data: Roles file data used to filter services
:param lock: a locking object to use when handling uploads
:returns: dict containing merged container image parameters from all
prepare operations
"""
if not lock:
lock = threadinglock.ThreadingLock()
pd = environment.get('parameter_defaults', {})
cip = pd.get('ContainerImagePrepare')
# if user does not provide a ContainerImagePrepare, use the defaults.
if not cip:
LOG.info(_("No ContainerImagePrepare parameter defined. Using "
"the defaults."))
cip = CONTAINER_IMAGE_PREPARE_PARAM
mirrors = {}
mirror = pd.get('DockerRegistryMirror')
if mirror:
mirrors['docker.io'] = mirror
creds = pd.get('ContainerImageRegistryCredentials')
multi_arch = len(pd.get('AdditionalArchitectures', []))
env_params = {}
service_filter = build_service_filter(
environment, roles_data)
for cip_entry in cip:
mapping_args = cip_entry.get('set', {})
set_neutron_driver(pd, mapping_args)
push_destination = cip_entry.get('push_destination')
# use the configured registry IP as the discovered registry
# if it is available
if push_destination and isinstance(push_destination, bool):
local_registry_ip = pd.get('LocalContainerRegistry')
if local_registry_ip:
push_destination = '%s:8787' % local_registry_ip
pull_source = cip_entry.get('pull_source')
modify_role = cip_entry.get('modify_role')
modify_vars = cip_entry.get('modify_vars')
modify_only_with_labels = cip_entry.get('modify_only_with_labels')
modify_only_with_source = cip_entry.get('modify_only_with_source')
modify_append_tag = cip_entry.get('modify_append_tag',
time.strftime(
'-modified-%Y%m%d%H%M%S'))
# do not use tag_from_label if a tag is specified in the set
tag_from_label = None
if not mapping_args.get('tag'):
tag_from_label = cip_entry.get('tag_from_label')
if multi_arch and 'multi_arch' in cip_entry:
# individual entry sets multi_arch,
# so set global multi_arch to False
multi_arch = False
prepare_data = container_images_prepare(
excludes=cip_entry.get('excludes'),
includes=cip_entry.get('includes'),
service_filter=service_filter,
pull_source=pull_source,
push_destination=push_destination,
mapping_args=mapping_args,
output_env_file='image_params',
output_images_file='upload_data',
tag_from_label=tag_from_label,
append_tag=modify_append_tag,
modify_role=modify_role,
modify_vars=modify_vars,
modify_only_with_labels=modify_only_with_labels,
modify_only_with_source=modify_only_with_source,
mirrors=mirrors,
registry_credentials=creds,
multi_arch=multi_arch,
lock=lock
)
env_params.update(prepare_data['image_params'])
if not dry_run and (push_destination or pull_source or modify_role):
with tempfile.NamedTemporaryFile(mode='w') as f:
yaml.safe_dump({
'container_images': prepare_data['upload_data']
}, f)
uploader = image_uploader.ImageUploadManager(
[f.name],
cleanup=cleanup,
mirrors=mirrors,
registry_credentials=creds,
multi_arch=multi_arch,
lock=lock
)
uploader.upload()
return env_params
def container_images_prepare_defaults():
"""Return default dict for prepare substitutions
This can be used as the mapping_args argument to the
container_images_prepare function to get the same result as not specifying
any mapping_args.
"""
return KollaImageBuilder.container_images_template_inputs()
def container_images_prepare(template_file=DEFAULT_TEMPLATE_FILE,
template_dir=DEFAULT_TEMPLATE_DIR,
excludes=None, includes=None, service_filter=None,
pull_source=None, push_destination=None,
mapping_args=None, output_env_file=None,
output_images_file=None, tag_from_label=None,
append_tag=None, modify_role=None,
modify_vars=None, modify_only_with_labels=None,
modify_only_with_source=None,
mirrors=None, registry_credentials=None,
multi_arch=False, lock=None):
"""Perform container image preparation
:param template_file: path to Jinja2 file containing all image entries
:param template_dir: path to Jinja2 files included in the main template
:param excludes: list of image name substrings to use for exclude filter
:param includes: list of image name substrings, at least one must match.
All excludes are ignored if includes is specified.
:param service_filter: set of heat resource types for containerized
services to filter by. Disable by passing None.
:param pull_source: DEPRECATED namespace for pulling during image uploads
:param push_destination: namespace for pushing during image uploads. When
specified the image parameters will use this
namespace too.
:param mapping_args: dict containing substitutions for template file. See
CONTAINER_IMAGES_DEFAULTS for expected keys.
:param output_env_file: key to use for heat environment parameter data
:param output_images_file: key to use for image upload data
:param tag_from_label: string when set will trigger tag discovery on every
image
:param append_tag: string to append to the tag for the destination
image
:param modify_role: string of ansible role name to run during upload before
the push to destination
:param modify_vars: dict of variables to pass to modify_role
:param modify_only_with_labels: only modify the container images with the
given labels
:param modify_only_with_source: only modify the container images from a
image_source in the tripleo-common service
to container mapping (e.g. kolla/tripleo)
:param mirrors: dict of registry netloc values to mirror urls
:param registry_credentials: dict of registry netloc values to
authentication credentials for that registry.
The value is a single-entry dict where the
username is the key and the password is the
value.
:param multi_arch: boolean whether to prepare every architecture of
each image
:param lock: a locking object to use when handling uploads
:returns: dict with entries for the supplied output_env_file or
output_images_file
"""
if mapping_args is None:
mapping_args = {}
if not lock:
lock = threadinglock.ThreadingLock()
def ffunc(entry):
imagename = entry.get('imagename', '')
if service_filter is not None:
# check the entry is for a service being deployed
image_services = set(entry.get('services', []))
if not image_services.intersection(service_filter):
return None
if includes:
for p in includes:
if re.search(p, imagename):
return entry
return None
if excludes:
for p in excludes:
if re.search(p, imagename):
return None
return entry
builder = KollaImageBuilder([template_file], template_dir)
result = builder.container_images_from_template(
filter=ffunc, **mapping_args)
manager = image_uploader.ImageUploadManager(
mirrors=mirrors,
registry_credentials=registry_credentials,
multi_arch=multi_arch,
lock=lock
)
uploader = manager.uploader('python')
images = [i.get('imagename', '') for i in result]
# set a flag to record whether the default tag is used or not. the
# logic here is that if the tag key is not already in mapping then it
# wil be added during the template render, so default_tag is set to
# True.
default_tag = 'tag' not in mapping_args
if tag_from_label:
image_version_tags = uploader.discover_image_tags(
images, tag_from_label, default_tag)
for entry in result:
imagename = entry.get('imagename', '')
image_no_tag = imagename.rpartition(':')[0]
if image_no_tag in image_version_tags:
entry['imagename'] = '%s:%s' % (
image_no_tag, image_version_tags[image_no_tag])
images_with_labels = []
if modify_only_with_labels:
images_with_labels = uploader.filter_images_with_labels(
images, modify_only_with_labels)
images_with_source = []
if modify_only_with_source:
images_with_source = [i.get('imagename') for i in result
if i.get('image_source', '')
in modify_only_with_source]
params = {}
modify_append_tag = append_tag
for entry in result:
imagename = entry.get('imagename', '')
append_tag = ''
if modify_role and (
(not modify_only_with_labels
and not modify_only_with_source) or
(imagename in images_with_labels or
imagename in images_with_source)):
entry['modify_role'] = modify_role
if modify_append_tag:
entry['modify_append_tag'] = modify_append_tag
append_tag = modify_append_tag
if modify_vars:
entry['modify_vars'] = modify_vars
if pull_source:
entry['pull_source'] = pull_source
if push_destination:
# substitute discovered registry if push_destination is set to true
if isinstance(push_destination, bool):
push_destination = image_uploader.get_undercloud_registry()
entry['push_destination'] = push_destination
# replace the host portion of the imagename with the
# push_destination, since that is where they will be uploaded to
image = imagename.partition('/')[2]
imagename = '/'.join((push_destination, image))
if 'params' in entry:
for p in entry.pop('params'):
params[p] = imagename + append_tag
if 'services' in entry:
del(entry['services'])
params.update(
detect_insecure_registries(params, lock=lock))
return_data = {}
if output_env_file:
return_data[output_env_file] = params
if output_images_file:
return_data[output_images_file] = result
return return_data
def detect_insecure_registries(params, lock=None):
"""Detect insecure registries in image parameters
:param params: dict of container image parameters
:returns: dict containing DockerInsecureRegistryAddress parameter to be
merged into other parameters
"""
insecure = set()
uploader = image_uploader.ImageUploadManager(lock=lock).uploader('python')
for image in params.values():
host = image.split('/')[0]
if uploader.is_insecure_registry(host):
insecure.add(host)
if not insecure:
return {}
return {'DockerInsecureRegistryAddress': sorted(insecure)}
class KollaImageBuilder(base.BaseImageManager):
"""Build images using kolla-build"""
@staticmethod
def imagename_to_regex(imagename):
if not imagename:
return
# remove any namespace from the start
imagename = imagename.split('/')[-1]
# remove any tag from the end
imagename = imagename.split(':')[0]
# remove supported base names from the start
imagename = re.sub(r'^(centos|rhel)-', '', imagename)
# remove install_type from the start
imagename = re.sub(r'^(binary|source|rdo|rhos)-', '', imagename)
# what results should be acceptable as a regex to build one image
return imagename
@staticmethod
def container_images_template_inputs(**kwargs):
'''Build the template mapping from defaults and keyword arguments.
Defaults in CONTAINER_IMAGES_DEFAULTS are combined with keyword
argments to return a dict that can be used to render the container
images template. Any set values for name_prefix and name_suffix are
hyphenated appropriately.
'''
mapping = dict(kwargs)
if CONTAINER_IMAGES_DEFAULTS is None:
return
for k, v in CONTAINER_IMAGES_DEFAULTS.items():
mapping.setdefault(k, v)
np = mapping['name_prefix']
if np and not np.endswith('-'):
mapping['name_prefix'] = np + '-'
ns = mapping['name_suffix']
if ns and not ns.startswith('-'):
mapping['name_suffix'] = '-' + ns
return mapping
def container_images_from_template(self, filter=None, **kwargs):
'''Build container_images data from container_images_template.
Any supplied keyword arguments are used for the substitution mapping to
transform the data in the config file container_images_template
section.
The resulting data resembles a config file which contains a valid
populated container_images section.
If a function is passed to the filter argument, this will be used to
modify the entry after substitution. If the filter function returns
None then the entry will not be added to the resulting list.
Defaults are applied so that when no arguments are provided.
'''
mapping = self.container_images_template_inputs(**kwargs)
result = []
if len(self.config_files) != 1:
raise ValueError('A single config file must be specified')
config_file = self.config_files[0]
template_dir = self.template_dir
with open(config_file) as cf:
if template_dir is not None:
template = jinja2.Environment(loader=jinja2.FileSystemLoader(
template_dir)).from_string(cf.read())
else:
template = jinja2.Template(cf.read())
rendered = template.render(mapping)
rendered_dict = yaml.safe_load(rendered)
for i in rendered_dict[self.CONTAINER_IMAGES_TEMPLATE]:
entry = dict(i)
if filter:
entry = filter(entry)
if entry is not None:
result.append(entry)
return result
def build_images(self, kolla_config_files=None, excludes=[],
template_only=False, kolla_tmp_dir=None):
cmd = ['kolla-build']
if kolla_config_files:
for f in kolla_config_files:
cmd.append('--config-file')
cmd.append(f)
if len(self.config_files) == 0:
self.config_files = [DEFAULT_TEMPLATE_FILE]
self.template_dir = DEFAULT_TEMPLATE_DIR
container_images = self.container_images_from_template()
else:
container_images = self.load_config_files(self.CONTAINER_IMAGES) \
or []
container_images.sort(key=lambda i: i.get('imagename'))
for i in container_images:
# Do not attempt to build containers that are not from kolla or
# are in our exclude list
if not i.get('image_source', '') == 'kolla':
continue
image = self.imagename_to_regex(i.get('imagename'))
# Make sure the image was properly parsed and not purposely skipped
if image and image not in excludes:
# NOTE(mgoddard): Use a full string match.
cmd.append("^%s$" % image)
if template_only:
# build the dep list cmd line
cmd_deps = list(cmd)
cmd_deps.append('--list-dependencies')
# build the template only cmd line
cmd.append('--template-only')
cmd.append('--work-dir')
cmd.append(kolla_tmp_dir)
LOG.info(_('Running %s'), ' '.join(cmd))
env = os.environ.copy()
process = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
universal_newlines=True)
out, err = process.communicate()
if process.returncode != 0:
LOG.error(_('Building containers image process failed with %d rc'),
process.returncode)
raise subprocess.CalledProcessError(process.returncode, cmd, err)
if template_only:
self.logger.info('Running %s', ' '.join(cmd_deps))
env = os.environ.copy()
process = subprocess.Popen(cmd_deps, env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = process.communicate()
if process.returncode != 0:
raise subprocess.CalledProcessError(process.returncode,
cmd_deps, err)
return out
|
|
"""The sensor tests for the august platform."""
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, PERCENTAGE, STATE_UNKNOWN
from homeassistant.helpers import entity_registry as er
from tests.components.august.mocks import (
_create_august_with_devices,
_mock_activities_from_fixture,
_mock_doorbell_from_fixture,
_mock_doorsense_enabled_august_lock_detail,
_mock_lock_from_fixture,
)
async def test_create_doorbell(hass):
"""Test creation of a doorbell."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json")
await _create_august_with_devices(hass, [doorbell_one])
sensor_k98gidt45gul_name_battery = hass.states.get(
"sensor.k98gidt45gul_name_battery"
)
assert sensor_k98gidt45gul_name_battery.state == "96"
assert (
sensor_k98gidt45gul_name_battery.attributes["unit_of_measurement"] == PERCENTAGE
)
async def test_create_doorbell_offline(hass):
"""Test creation of a doorbell that is offline."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.offline.json")
await _create_august_with_devices(hass, [doorbell_one])
entity_registry = er.async_get(hass)
sensor_tmt100_name_battery = hass.states.get("sensor.tmt100_name_battery")
assert sensor_tmt100_name_battery.state == "81"
assert sensor_tmt100_name_battery.attributes["unit_of_measurement"] == PERCENTAGE
entry = entity_registry.async_get("sensor.tmt100_name_battery")
assert entry
assert entry.unique_id == "tmt100_device_battery"
async def test_create_doorbell_hardwired(hass):
"""Test creation of a doorbell that is hardwired without a battery."""
doorbell_one = await _mock_doorbell_from_fixture(
hass, "get_doorbell.nobattery.json"
)
await _create_august_with_devices(hass, [doorbell_one])
sensor_tmt100_name_battery = hass.states.get("sensor.tmt100_name_battery")
assert sensor_tmt100_name_battery is None
async def test_create_lock_with_linked_keypad(hass):
"""Test creation of a lock with a linked keypad that both have a battery."""
lock_one = await _mock_lock_from_fixture(hass, "get_lock.doorsense_init.json")
await _create_august_with_devices(hass, [lock_one])
entity_registry = er.async_get(hass)
sensor_a6697750d607098bae8d6baa11ef8063_name_battery = hass.states.get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_battery"
)
assert sensor_a6697750d607098bae8d6baa11ef8063_name_battery.state == "88"
assert (
sensor_a6697750d607098bae8d6baa11ef8063_name_battery.attributes[
"unit_of_measurement"
]
== PERCENTAGE
)
entry = entity_registry.async_get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_battery"
)
assert entry
assert entry.unique_id == "A6697750D607098BAE8D6BAA11EF8063_device_battery"
state = hass.states.get("sensor.front_door_lock_keypad_battery")
assert state.state == "60"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == PERCENTAGE
entry = entity_registry.async_get("sensor.front_door_lock_keypad_battery")
assert entry
assert entry.unique_id == "5bc65c24e6ef2a263e1450a8_linked_keypad_battery"
async def test_create_lock_with_low_battery_linked_keypad(hass):
"""Test creation of a lock with a linked keypad that both have a battery."""
lock_one = await _mock_lock_from_fixture(hass, "get_lock.low_keypad_battery.json")
await _create_august_with_devices(hass, [lock_one])
entity_registry = er.async_get(hass)
sensor_a6697750d607098bae8d6baa11ef8063_name_battery = hass.states.get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_battery"
)
assert sensor_a6697750d607098bae8d6baa11ef8063_name_battery.state == "88"
assert (
sensor_a6697750d607098bae8d6baa11ef8063_name_battery.attributes[
"unit_of_measurement"
]
== PERCENTAGE
)
entry = entity_registry.async_get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_battery"
)
assert entry
assert entry.unique_id == "A6697750D607098BAE8D6BAA11EF8063_device_battery"
state = hass.states.get("sensor.front_door_lock_keypad_battery")
assert state.state == "10"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == PERCENTAGE
entry = entity_registry.async_get("sensor.front_door_lock_keypad_battery")
assert entry
assert entry.unique_id == "5bc65c24e6ef2a263e1450a8_linked_keypad_battery"
# No activity means it will be unavailable until someone unlocks/locks it
lock_operator_sensor = entity_registry.async_get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_operator"
)
assert (
lock_operator_sensor.unique_id
== "A6697750D607098BAE8D6BAA11EF8063_lock_operator"
)
assert (
hass.states.get("sensor.a6697750d607098bae8d6baa11ef8063_name_operator").state
== STATE_UNKNOWN
)
async def test_lock_operator_bluetooth(hass):
"""Test operation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.lock_from_bluetooth.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
entity_registry = er.async_get(hass)
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== "Your favorite elven princess"
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"remote"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"keypad"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"autorelock"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"method"
]
== "mobile"
)
async def test_lock_operator_keypad(hass):
"""Test operation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.lock_from_keypad.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
entity_registry = er.async_get(hass)
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== "Your favorite elven princess"
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"remote"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"keypad"
]
is True
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"autorelock"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"method"
]
== "keypad"
)
async def test_lock_operator_remote(hass):
"""Test operation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(hass, "get_activity.lock.json")
await _create_august_with_devices(hass, [lock_one], activities=activities)
entity_registry = er.async_get(hass)
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== "Your favorite elven princess"
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"remote"
]
is True
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"keypad"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"autorelock"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"method"
]
== "remote"
)
async def test_lock_operator_autorelock(hass):
"""Test operation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.lock_from_autorelock.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
entity_registry = er.async_get(hass)
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== "Auto Relock"
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"remote"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"keypad"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"autorelock"
]
is True
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"method"
]
== "autorelock"
)
|
|
#!/usr/bin/env python
"""
Prints a map of the entire world.
"""
import locale, os, sys
import re, math
from struct import pack, unpack
# local module
try:
import nbt
except ImportError:
# nbt not in search path. Let's see if it can be found in the parent folder
extrasearchpath = os.path.realpath(os.path.join(__file__,os.pardir,os.pardir))
if not os.path.exists(os.path.join(extrasearchpath,'nbt')):
raise
sys.path.append(extrasearchpath)
from nbt.region import RegionFile
from nbt.chunk import Chunk
from nbt.world import WorldFolder,McRegionWorldFolder
# PIL module (not build-in)
try:
from PIL import Image
except ImportError:
# PIL not in search path. Let's see if it can be found in the parent folder
sys.stderr.write("Module PIL/Image not found. Pillow (a PIL fork) can be found at http://python-imaging.github.io/\n")
# Note: it may also be possible that PIL is installed, but JPEG support is disabled or broken
sys.exit(70) # EX_SOFTWARE
def get_heightmap_image(chunk, buffer=False, gmin=False, gmax=False):
points = chunk.blocks.generate_heightmap(buffer, True)
# Normalize the points
hmin = min(points) if (gmin == False) else gmin # Allow setting the min/max explicitly, in case this is part of a bigger map
hmax = max(points) if (gmax == False) else gmax
hdelta = hmax-hmin+0.0
pixels = ""
for y in range(16):
for x in range(16):
# pix X => mc -Z
# pix Y => mc X
offset = (15-x)*16+y
height = int((points[offset]-hmin)/hdelta*255)
if (height < 0): height = 0
if (height > 255): height = 255
pixels += pack(">B", height)
im = Image.fromstring('L', (16,16), pixels)
return im
def get_map(chunk):
# Show an image of the chunk from above
pixels = b""
block_colors = {
0: {'h':0, 's':0, 'l':0}, # Air
1: {'h':0, 's':0, 'l':32}, # Stone
2: {'h':94, 's':42, 'l':32}, # Grass
3: {'h':27, 's':51, 'l':15}, # Dirt
4: {'h':0, 's':0, 'l':25}, # Cobblestone
8: {'h':228, 's':50, 'l':23}, # Water
9: {'h':228, 's':50, 'l':23}, # Water
10: {'h':16, 's':100, 'l':48}, # Lava
11: {'h':16, 's':100, 'l':48}, # Lava
12: {'h':53, 's':22, 'l':58}, # Sand
13: {'h':21, 's':18, 'l':20}, # Gravel
17: {'h':35, 's':93, 'l':15}, # Wood
18: {'h':114, 's':64, 'l':22}, # Leaves
24: {'h':48, 's':31, 'l':40}, # Sandstone
37: {'h':60, 's':100, 'l':60}, # Yellow Flower
38: {'h':0, 's':100, 'l':50}, # Red Flower
50: {'h':60, 's':100, 'l':50}, # Torch
51: {'h':55, 's':100, 'l':50}, # Fire
59: {'h':123, 's':60, 'l':50}, # Crops
60: {'h':35, 's':93, 'l':15}, # Farmland
78: {'h':240, 's':10, 'l':85}, # Snow
79: {'h':240, 's':10, 'l':95}, # Ice
81: {'h':126, 's':61, 'l':20}, # Cacti
82: {'h':7, 's':62, 'l':23}, # Clay
83: {'h':123, 's':70, 'l':50}, # Sugarcane
86: {'h':24, 's':100, 'l':45}, # Pumpkin
91: {'h':24, 's':100, 'l':45}, # Jack-o-lantern
}
for z in range(16):
for x in range(16):
# Find the highest block in this column
ground_height = 127
tints = []
for y in range(127,-1,-1):
block_id = chunk.blocks.get_block(x,y,z)
block_data = chunk.blocks.get_data(x,y,z)
if (block_id == 8 or block_id == 9):
tints.append({'h':228, 's':50, 'l':23}) # Water
elif (block_id == 18):
if (block_data == 1):
tints.append({'h':114, 's':64, 'l':22}) # Redwood Leaves
elif (block_data == 2):
tints.append({'h':93, 's':39, 'l':10}) # Birch Leaves
else:
tints.append({'h':114, 's':64, 'l':22}) # Normal Leaves
elif (block_id == 79):
tints.append({'h':240, 's':5, 'l':95}) # Ice
elif (block_id == 51):
tints.append({'h':55, 's':100, 'l':50}) # Fire
elif (block_id != 0 or y == 0):
# Here is ground level
ground_height = y
break
color = block_colors[block_id] if (block_id in block_colors) else {'h':0, 's':0, 'l':100}
height_shift = (ground_height-64)*0.25
final_color = {'h':color['h'], 's':color['s'], 'l':color['l']+height_shift}
if final_color['l'] > 100: final_color['l'] = 100
if final_color['l'] < 0: final_color['l'] = 0
# Apply tints from translucent blocks
for tint in reversed(tints):
final_color = hsl_slide(final_color, tint, 0.4)
rgb = hsl2rgb(final_color['h'], final_color['s'], final_color['l'])
pixels += pack("BBB", rgb[0], rgb[1], rgb[2])
im = Image.frombytes('RGB', (16,16), pixels)
return im
## Color functions for map generation ##
# Hue given in degrees,
# saturation and lightness given either in range 0-1 or 0-100 and returned in kind
def hsl_slide(hsl1, hsl2, ratio):
if (abs(hsl2['h'] - hsl1['h']) > 180):
if (hsl1['h'] > hsl2['h']):
hsl1['h'] -= 360
else:
hsl1['h'] += 360
# Find location of two colors on the H/S color circle
p1x = math.cos(math.radians(hsl1['h']))*hsl1['s']
p1y = math.sin(math.radians(hsl1['h']))*hsl1['s']
p2x = math.cos(math.radians(hsl2['h']))*hsl2['s']
p2y = math.sin(math.radians(hsl2['h']))*hsl2['s']
# Slide part of the way from tint to base color
avg_x = p1x + ratio*(p2x-p1x)
avg_y = p1y + ratio*(p2y-p1y)
avg_h = math.atan(avg_y/avg_x)
avg_s = avg_y/math.sin(avg_h)
avg_l = hsl1['l'] + ratio*(hsl2['l']-hsl1['l'])
avg_h = math.degrees(avg_h)
#print('tint: %s base: %s avg: %s %s %s' % (tint,final_color,avg_h,avg_s,avg_l))
return {'h':avg_h, 's':avg_s, 'l':avg_l}
# From http://www.easyrgb.com/index.php?X=MATH&H=19#text19
def hsl2rgb(H,S,L):
H = H/360.0
S = S/100.0 # Turn into a percentage
L = L/100.0
if (S == 0):
return (int(L*255), int(L*255), int(L*255))
var_2 = L * (1+S) if (L < 0.5) else (L+S) - (S*L)
var_1 = 2*L - var_2
def hue2rgb(v1, v2, vH):
if (vH < 0): vH += 1
if (vH > 1): vH -= 1
if ((6*vH)<1): return v1 + (v2-v1)*6*vH
if ((2*vH)<1): return v2
if ((3*vH)<2): return v1 + (v2-v1)*(2/3.0-vH)*6
return v1
R = int(255*hue2rgb(var_1, var_2, H + (1.0/3)))
G = int(255*hue2rgb(var_1, var_2, H))
B = int(255*hue2rgb(var_1, var_2, H - (1.0/3)))
return (R,G,B)
def main(world_folder, show=True):
world = McRegionWorldFolder(world_folder) # map still only supports McRegion maps
bb = world.get_boundingbox()
map = Image.new('RGB', (16*bb.lenx(),16*bb.lenz()))
t = world.chunk_count()
try:
i =0.0
for chunk in world.iter_chunks():
if i % 50 ==0:
sys.stdout.write("Rendering image")
elif i % 2 == 0:
sys.stdout.write(".")
sys.stdout.flush()
elif i % 50 == 49:
sys.stdout.write("%5.1f%%\n" % (100*i/t))
i +=1
chunkmap = get_map(chunk)
x,z = chunk.get_coords()
map.paste(chunkmap, (16*(x-bb.minx),16*(z-bb.minz)))
print(" done\n")
filename = os.path.basename(world_folder)+".png"
map.save(filename,"PNG")
print("Saved map as %s" % filename)
except KeyboardInterrupt:
print(" aborted\n")
filename = os.path.basename(world_folder)+".partial.png"
map.save(filename,"PNG")
print("Saved map as %s" % filename)
return 75 # EX_TEMPFAIL
if show:
map.show()
return 0 # NOERR
if __name__ == '__main__':
if (len(sys.argv) == 1):
print("No world folder specified!")
sys.exit(64) # EX_USAGE
if sys.argv[1] == '--noshow' and len(sys.argv) > 2:
show = False
world_folder = sys.argv[2]
else:
show = True
world_folder = sys.argv[1]
# clean path name, eliminate trailing slashes. required for os.path.basename()
world_folder = os.path.normpath(world_folder)
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
sys.exit(72) # EX_IOERR
sys.exit(main(world_folder, show))
|
|
# Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import keymgr as key_manager
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db=None):
super(BackupMetadataAPI, self).__init__(db)
self.context = context
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info("Value with type=%s is not serializable",
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# Copy the encryption key UUID for backup
if key is 'encryption_key_id' and value is not None:
km = key_manager.API(CONF)
value = km.store(self.context, km.get(self.context, value))
LOG.debug("Copying encryption key UUID for backup.")
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields, excludes=None):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
:param metadata: master set of metadata
:param fields: list of fields we want to extract
:param excludes: fields to be excluded
:returns: filtered metadata
"""
if not fields:
return metadata
if not excludes:
excludes = []
subset = {}
for field in fields:
if field in metadata and field not in excludes:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
excludes = []
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
# NOTE(dosaboy): if the target volume looks like it was auto-created
# as part of this restore operation and we have a name to restore
# then apply the name to the target volume. However, if that target
# volume already existed and it has a name or we do not have a name to
# restore, then ignore this key. This is intended to be a less drastic
# solution than commit 7ee80f7.
key = 'display_name'
if key in fields and key in metadata:
target_vol = self.db.volume_get(self.context, volume_id)
name = target_vol.get(key, '')
if (not metadata.get(key) or name and
not name.startswith('restore_backup_')):
excludes.append(key)
excludes.append('display_description')
metadata = self._filter(metadata, fields, excludes=excludes)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description', 'encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db=None):
super(BackupDriver, self).__init__(db)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume."""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup."""
return
@abc.abstractmethod
def delete_backup(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
and return it here as a dictionary so it can be serialized into a
string.
Default backup driver implementation has no extra information.
:param backup: backup object to export
:returns: driver_info - dictionary with extra information
"""
return {}
def import_record(self, backup, driver_info):
"""Import driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
since it will be called with the extra information that was provided by
export_record when exporting the backup.
Default backup driver implementation does nothing since it didn't
export any specific data in export_record.
:param backup: backup object to export
:param driver_info: dictionary with driver specific backup record
information
:returns: nothing
"""
return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed."""
return
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises InvalidBackup, NotImplementedError:
"""
return
|
|
# Copyright 2014 - Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import client as glanceclient
from heatclient import client as heatclient
from mistralclient.api import client as mistralclient
from neutronclient.neutron import client as neutronclient
from oslo_config import cfg
from swiftclient import client as swiftclient
from zaqarclient.queues.v1 import client as zaqarclient
from solum.common import exception
from solum.common import solum_barbicanclient
from solum.common import solum_keystoneclient
from solum.i18n import _
GLOBAL_CLIENT_OPTS = [
cfg.StrOpt('region_name',
default='RegionOne',
help=_(
'Region of endpoint in Identity service catalog to use'
' for all clients.')),
]
barbican_client_opts = [
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate for barbican "
"will not be verified.")), ]
# Note: this config is duplicated in many projects that use OpenStack
# clients. This should really be in the client.
# There is a place holder bug here:
# https://bugs.launchpad.net/solum/+bug/1292334
# that we use to track this.
glance_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Glance service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.'))]
heat_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
zaqar_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Queue service catalog to use '
'for communication with the Zaqar service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate for zaqar "
"will not be verified."))]
neutron_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Neutron service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.StrOpt('ca_cert',
help=_('Optional CA bundle file to use in SSL connections.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate for neutron "
"will not be verified."))]
swift_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Swift service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.StrOpt('cacert',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set the server certificate will not be verified."))]
mistral_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the mistral service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.StrOpt('cacert',
help=_('Optional CA cert file to use in SSL connections '
'with Mistral.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set the server certificate will not be verified "
"while using Mistral."))]
def list_opts():
yield None, GLOBAL_CLIENT_OPTS
yield 'barbican_client', barbican_client_opts
yield 'glance_client', glance_client_opts
yield 'heat_client', heat_client_opts
yield 'zaqar_client', zaqar_client_opts
yield 'neutron_client', neutron_client_opts
yield 'swift_client', swift_client_opts
yield 'mistral_client', mistral_client_opts
cfg.CONF.register_opts(GLOBAL_CLIENT_OPTS)
cfg.CONF.register_opts(barbican_client_opts, group='barbican_client')
cfg.CONF.register_opts(glance_client_opts, group='glance_client')
cfg.CONF.register_opts(heat_client_opts, group='heat_client')
cfg.CONF.register_opts(zaqar_client_opts, group='zaqar_client')
cfg.CONF.register_opts(neutron_client_opts, group='neutron_client')
cfg.CONF.register_opts(swift_client_opts, group='swift_client')
cfg.CONF.register_opts(mistral_client_opts, group='mistral_client')
def get_client_option(client, option):
value = getattr(getattr(cfg.CONF, '%s_client' % client), option)
if option == 'region_name':
global_region = cfg.CONF.get(option)
return value or global_region
else:
return value
class OpenStackClients(object):
"""Convenience class to create and cache client instances."""
def __init__(self, context):
self.context = context
self._barbican = None
self._keystone = None
self._glance = None
self._heat = None
self._neutron = None
self._zaqar = None
self._mistral = None
def url_for(self, **kwargs):
return self.keystone().client.service_catalog.url_for(**kwargs)
@property
def auth_url(self):
return self.keystone().endpoint
@property
def auth_token(self):
return self.context.auth_token or self.keystone().auth_token
@exception.wrap_keystone_exception
def barbican(self):
if self._barbican:
return self._barbican
insecure = get_client_option('barbican', 'insecure')
self._barbican = solum_barbicanclient.BarbicanClient(
verify=not insecure)
return self._barbican
def keystone(self):
if self._keystone:
return self._keystone
self._keystone = solum_keystoneclient.KeystoneClient(self.context)
return self._keystone
@exception.wrap_keystone_exception
def zaqar(self):
if self._zaqar:
return self._zaqar
endpoint_type = get_client_option('zaqar', 'endpoint_type')
region_name = get_client_option('zaqar', 'region_name')
endpoint_url = self.url_for(service_type='queuing',
interface=endpoint_type,
region_name=region_name)
conf = {'auth_opts':
{'backend': 'keystone',
'options': {'os_auth_token': self.auth_token,
'os_auth_url': self.auth_url,
'insecure': get_client_option('zaqar',
'insecure')}
}
}
self._zaqar = zaqarclient.Client(endpoint_url, conf=conf)
return self._zaqar
@exception.wrap_keystone_exception
def neutron(self):
if self._neutron:
return self._neutron
endpoint_type = get_client_option('neutron', 'endpoint_type')
region_name = get_client_option('neutron', 'region_name')
endpoint_url = self.url_for(service_type='network',
interface=endpoint_type,
region_name=region_name)
args = {
'auth_url': self.auth_url,
'endpoint_url': endpoint_url,
'token': self.auth_token,
'username': None,
'password': None,
'insecure': get_client_option('neutron', 'insecure'),
'ca_cert': get_client_option('neutron', 'ca_cert')
}
self._neutron = neutronclient.Client('2.0', **args)
return self._neutron
@exception.wrap_keystone_exception
def glance(self):
if self._glance:
return self._glance
args = {
'token': self.auth_token,
}
endpoint_type = get_client_option('glance', 'endpoint_type')
region_name = get_client_option('glance', 'region_name')
endpoint = self.url_for(service_type='image',
interface=endpoint_type,
region_name=region_name)
self._glance = glanceclient.Client('2', endpoint, **args)
return self._glance
@exception.wrap_keystone_exception
def mistral(self):
if self._mistral:
return self._mistral
args = {
'auth_token': self.auth_token,
}
endpoint_type = get_client_option('mistral', 'endpoint_type')
region_name = get_client_option('mistral', 'region_name')
endpoint = self.url_for(service_type='workflow',
interface=endpoint_type,
region_name=region_name)
self._mistral = mistralclient.client(mistral_url=endpoint, **args)
return self._mistral
@exception.wrap_keystone_exception
def heat(self, username=None, password=None, token=None):
if self._heat:
return self._heat
if token:
token_to_use = token
else:
token_to_use = self.auth_token
endpoint_type = get_client_option('heat', 'endpoint_type')
args = {
'auth_url': self.auth_url,
'token': token_to_use,
'username': username,
'password': password,
'ca_file': get_client_option('heat', 'ca_file'),
'cert_file': get_client_option('heat', 'cert_file'),
'key_file': get_client_option('heat', 'key_file'),
'insecure': get_client_option('heat', 'insecure')
}
region_name = get_client_option('heat', 'region_name')
endpoint = self.url_for(service_type='orchestration',
interface=endpoint_type,
region_name=region_name)
self._heat = heatclient.Client('1', endpoint, **args)
return self._heat
@exception.wrap_keystone_exception
def swift(self):
# Not caching swift connections because of range requests
# Check how glance_store uses swift client for a reference
endpoint_type = get_client_option('swift', 'endpoint_type')
region_name = get_client_option('swift', 'region_name')
args = {
'auth_version': '2.0',
'preauthtoken': self.auth_token,
'preauthurl': self.url_for(service_type='object-store',
interface=endpoint_type,
region_name=region_name),
'os_options': {'endpoint_type': endpoint_type,
'region_name': region_name},
'cacert': get_client_option('swift', 'cacert'),
'insecure': get_client_option('swift', 'insecure')
}
return swiftclient.Connection(**args)
|
|
import os, json, atexit, copy
import matplotlib.pyplot as plt
import numpy as np
import scipy.io.wavfile as wavfile
from numpy import exp, pi
from mpl_toolkits.mplot3d import Axes3D
from math import radians, degrees, log
from musp import Location
class ASTF:
def __init__(self, astf_data_generator, location, post_processor=None, filename=None):
self.location = location
# Generator goes to get the data we want to save to disk (maybe from disk), and
# post-processor does some lightweight manipulation of the loaded data before returning it.
# (you wouldn't want to save angle-dependent transfer functions that have been scaled/
# delayed for distance, right?)
self.astf_data_generator = astf_data_generator
self.post_processor = (lambda l,d,irl:(d,irl)) if post_processor is None else post_processor
self.filename = filename
self.data_c = ASTF.DataContainer()
def generate_astf(self):
if self.data_c.data is None:
self.data_c.data, self.data_c.ir_length = self.astf_data_generator()
return self.post_processor(self.location, self.data_c.data, self.data_c.ir_length)
def with_post_processor(self, pp):
new_astf = copy.copy(self) # retain data container, generator
new_astf.post_processor = pp
return new_astf
def has_data(self):
return self.data_c.data is not None
def data(self):
return self.data_c.data
def ir_length(self):
return self.data_c.ir_length
def __str__(self):
return "ASTF @" + str(self.location) + " with generator " + str(self.astf_data_generator) + \
(" from " + self.filename if self.filename is not None else '') + \
(" impulse " + str(self.data_c.ir_length) + " samples long with data @" + \
str(id(self.data_c.data)) if self.has_data() else '')
class DataContainer:
def __init__(self):
self.data = None
self.ir_length = None
def __iter__(self):
yield data
yield self.ir_length
class AuralSpace(object):
def astf_for_location(self, location):
return self._create_astf(Location(location))
def apply_decays(self, astf_data, location, start_location=None):
decays = np.array(zip(Location(location).decays_at_ears()))
if start_location is not None:
decays /= np.array(zip(start_location.decays_at_ears()))
return astf_data * decays
def correct_delays(self, astf_data, ir_length, location,
max_delay_samples=None, start_location=None):
delays = np.array(zip(Location(location).delays_to_ears()))*self.rate
if start_location is not None:
if start_location == location:
return astf_data, ir_length
# neg delay shift ok if correctly cached
delays -= np.array(zip(Location(start_location).delays_to_ears()))*self.rate
if max_delay_samples is not None:
def compression_func(x):
return np.where(x>0, x/(exp(x) + x), x)
delay_fracs = delays/max_delay_samples
delays = max_delay_samples*compression_func(delay_fracs)
data_len = astf_data.shape[1]
overall_samples = (data_len - 1)*2
# A shift of n is realized by a multiplication by exp(2pi*n*w/T) (but it can be fractional!)
exp_coeff = -2j * pi / overall_samples
transfer_func = exp(exp_coeff * delays * \
np.tile(np.arange(data_len), (2, 1)))
return astf_data * transfer_func, int(ir_length + max(*delays))
class EarDelayAuralSpace(AuralSpace):
def __init__(self, name, rate):
self.name = name
self.rate = rate
def _create_astf(self, location):
# use a relatively long block, within an order of a second
min_block_samples = int(self.rate * .05)
# use 20% more samples than needed for the maximum delay
delayr, delayl = location.delays_to_ears()
impulse_samples = int(max(delayr*self.rate, delayl*self.rate)*1.2)
# complete astf will have minimum possible power of 2 length for efficient fft
overall_samples = 2*2**int(log(min_block_samples + impulse_samples, 2))
def edas_astf_generator():
tabula_rasa = np.ones((2, (overall_samples)/2 + 1))
delayed, mod_ir_length = self.correct_delays(tabula_rasa, impulse_samples,
location, max_delay_samples=(overall_samples - impulse_samples))
decayed = self.apply_decays(delayed, location)
return decayed, mod_ir_length
return ASTF(edas_astf_generator, location)
class DiscreteAuralSpace(AuralSpace):
cache_path = os.path.join(os.path.expanduser('~'), ".mu-sp", "cache")
metadata_file_name = "meta.json"
json_loc_property = "loc"
json_ir_length_property = "ir_len"
def __init__(self, name, rate, wrapped_as_class=None, dist_metric=Location.cosine_distance,
existing_cache_dirs=[]):
self.name = name
self.astfs = []
self.wrapped_as = self if wrapped_as_class is None else wrapped_as_class("Discrete AS " +
name + "-wrapped " + wrapped_as_class.__name__, rate)
self.changed_metadata = False
self.dist_metric = dist_metric
self.rate = rate
self.unique_cache_dir = os.path.join(DiscreteAuralSpace.cache_path, name)
if self.unique_cache_dir in existing_cache_dirs:
print "Ya just can't make two discrete aural spaces with the same cache directory, y'see"
exit()
existing_cache_dirs.append(self.unique_cache_dir)
self.cache_metadata_file = os.path.join(self.unique_cache_dir,
DiscreteAuralSpace.metadata_file_name)
if os.path.exists(self.cache_metadata_file):
with open(self.cache_metadata_file, 'r') as mdf:
for filename, meta in json.load(mdf).iteritems():
ir_length = int(meta[DiscreteAuralSpace.json_ir_length_property])
filepath = os.path.join(self.unique_cache_dir, filename)
astf_data_generator = lambda f=filepath, irl=ir_length: (np.load(f), irl)
x, y, z = meta[DiscreteAuralSpace.json_loc_property]
location = Location(float(x), float(y), float(z))
self.astfs.append(ASTF(astf_data_generator, location, filename))
print len(self.astfs), "ASTFS ADDED RIGHT AT THE BEGINNING FROM CACHE!"
atexit.register(self._save_out_cache)
def astf_for_location(self, location):
astf, = self.wrapped_as.n_nearest_astfs(1, Location(location))
return astf
def n_nearest_locations(self, n, location):
# default behavior only makes sense when whole cache is populated in advance
nearest = sorted(self.astfs,
key=lambda a: self.dist_metric(a.location, Location(location)))[:n]
return [astf.location for astf in nearest]
def n_nearest_astfs(self, n, location):
locs = self.wrapped_as.n_nearest_locations(n, location)
nearest = []
for loc in locs[:]:
astf = self._saved_astf_for_location(loc)
if astf is None:
new_astf = self.wrapped_as._create_astf(loc)
self.astfs.append(new_astf)
nearest.append(new_astf)
else:
nearest.append(astf)
return [a.with_post_processor(self.wrapped_as._astf_post_processor(location))
for a in nearest]
def _astf_post_processor(self, destination_location):
# default astf post processor applies shifts and decays assuming that the loaded astf
# is at the standard distance; override if not
def vanilla_post_processor(loc, data_from_cache, ir_len):
filter_length = (data_from_cache.shape[1] - 1)*2
delayed, mod_ir_len = self.correct_delays(data_from_cache, ir_len, destination_location,
max_delay_samples=(filter_length - ir_len), start_location=loc)
decayed = self.apply_decays(delayed, destination_location, start_location=loc)
return (decayed, mod_ir_len)
return vanilla_post_processor
def _saved_astf_for_location(self, location):
for astf in self.astfs:
if astf.location == location:
return astf
return None
def _cache_name_for_astf(self, astf):
(t, p), r = astf.location.spherical()
return self.name + '_' + '_'.join(['%.3f'%c for c in [t, p, r]]) + ".astfdata"
def _save_out_cache(self):
print "SAVING OUT THE CACHE! YAY! IT WORKED!"
if not self.astfs:
print "...but there's nothing to save. aw."
return
if not os.path.exists(self.unique_cache_dir):
print "making new cache directory for aural space " + self.name
os.mkdir(self.unique_cache_dir)
meta_map = {}
for astf in self.astfs:
if not astf.has_data():
continue
filename = astf.filename if astf.filename else self.wrapped_as._cache_name_for_astf(astf)
filepath = os.path.join(self.unique_cache_dir, filename)
if not os.path.exists(filepath):
with open(filepath, 'w+') as tf_file:
np.save(tf_file, astf.data())
meta_map[filename] = {DiscreteAuralSpace.json_loc_property:
tuple(c for c in astf.location),
DiscreteAuralSpace.json_ir_length_property:
astf.ir_length()}
with open(self.cache_metadata_file, 'w') as mdf:
json.dump(meta_map, mdf)
class DiscreteEarDelayAS(DiscreteAuralSpace, EarDelayAuralSpace):
from math import pi
num_points = 100
points = [Location((pi*2/num_points*(i-num_points/2), 0), Location.standard_distance)
for i in range(num_points)]
def n_nearest_locations(self, n, location):
return sorted(DiscreteEarDelayAS.points,
key=lambda l:l.cosine_distance_to(Location(location)))[:n]
class KemarAuralSpace(DiscreteAuralSpace):
hrtf_dir = os.path.join(os.path.expanduser('~'), ".mu-sp", "hrtf", "kemar")
hrtf_avg_energy = .6
def __init__(self, name, rate):
# init and load HRTFs from cache
super(self.__class__, self).__init__(name, rate)
self.files_for_locs = {}
for filename in os.listdir(KemarAuralSpace.hrtf_dir):
iH = filename.index('H')
ie = filename.index('e')
ia = filename.index('a')
elevation_deg, attitude_deg = float(filename[iH + 1:ie]), -float(filename[ie + 1:ia])
loc = Location((radians(attitude_deg), radians(elevation_deg)),
Location.standard_distance)
mirror_loc = loc.right_half_plane()
self.files_for_locs[loc.cache_tag()] = loc, filename
self.files_for_locs[mirror_loc.cache_tag()] = mirror_loc, filename
# build all remaining HRTFs
for loc, filename in self.files_for_locs.values():
if not self._saved_astf_for_location(loc):
self.astfs.append(self._create_astf(loc))
def _create_astf(self, location):
min_block_length = self.rate * .05
loc, filename = self.files_for_locs[location.cache_tag()]
if not loc == location:
print "Something went terribly wrong."
exit()
path = os.path.join(KemarAuralSpace.hrtf_dir, filename)
def kas_generate_astf():
filerate, data = wavfile.read(path)
raw_data = np.transpose(np.array(data))
unit_size_data = raw_data.astype(np.float) / (2**15)
energy = KemarAuralSpace.hrtf_avg_energy
impulse_data = unit_size_data/np.sqrt(energy) # normalize by RMS
if not location.right_half_plane() == location:
impulse_data = impulse_data[::-1,:] # flip left and right channels
# complete astf will have minimum possible power of 2 length for efficient fft
impulse_len = impulse_data.shape[1]
overall_samples = 2*2**int(log(min_block_length + impulse_len, 2))
hrtf_data = np.fft.rfft(np.hstack((impulse_data,
np.zeros((2, overall_samples - impulse_len)))))
return hrtf_data, impulse_data.shape[1]
return ASTF(kas_generate_astf, location)
|
|
from django.contrib.auth.models import User
from django.db import transaction
from jsonrpc import jsonrpc_method
from alexia.api.decorators import manager_required
from alexia.api.exceptions import InvalidParamsError
from alexia.apps.billing.models import RfidCard
from alexia.auth.backends import RADIUS_BACKEND_NAME, SAML2_BACKEND_NAME
from ..common import format_rfidcard
from ..config import api_v1_site
@jsonrpc_method('rfid.list(radius_username=String) -> Array', site=api_v1_site, safe=True, authenticated=True)
@manager_required
def rfid_list(request, radius_username=None):
"""
Retrieve registered RFID cards for the current selected organization.
Required user level: Manager
Provide radius_username to select only RFID cards registered by the provided user.
Returns an array of registered RFID cards.
radius_username -- (optional) Username to search for.
Example return value:
[
{
"identifier": "02,98:76:54:32",
"registered_at": "2014-09-21T14:16:06+00:00"
"user": "s0000000"
},
{
"identifier": "02,dd:ee:ff:00",
"registered_at": "2014-09-21T14:16:06+00:00"
"user": "s0000000"
},
{
"identifier": "03,fe:dc:ba:98",
"registered_at": "2014-09-21T14:16:06+00:00"
"user": "s0000000"
},
{
"identifier": "05,01:23:45:67:89:ab:cd",
"registered_at": "2014-09-21T14:16:06+00:00"
"user": "s0000019"
}
]
"""
result = []
rfidcards = RfidCard.objects.filter(managed_by=request.organization)
if radius_username is not None:
try:
user = User.objects.get(authenticationdata__backend=SAML2_BACKEND_NAME,
authenticationdata__username=radius_username)
except User.DoesNotExist:
try:
user = User.objects.get(authenticationdata__backend=RADIUS_BACKEND_NAME,
authenticationdata__username=radius_username)
except User.DoesNotExist:
return []
rfidcards = rfidcards.filter(user=user)
rfidcards = rfidcards.select_related('user')
for rfidcard in rfidcards:
result.append(format_rfidcard(rfidcard))
return result
@jsonrpc_method('rfid.get(radius_username=String) -> Array', site=api_v1_site, safe=True, authenticated=True)
@manager_required
def rfid_get(request, radius_username):
"""
Retrieve registered RFID cards for a specified user and current selected
organization.
Required user level: Manager
Returns an array of registered RFID cards.
radius_username -- Username to search for.
Example return value:
[
"02,98:76:54:32",
"02,dd:ee:ff:00",
"03,fe:dc:ba:98",
"05,01:23:45:67:89:ab:cd"
]
Raises error -32602 (Invalid params) if the username does not exist.
"""
result = []
try:
user = User.objects.get(authenticationdata__backend=SAML2_BACKEND_NAME,
authenticationdata__username=radius_username)
except User.DoesNotExist:
try:
user = User.objects.get(authenticationdata__backend=RADIUS_BACKEND_NAME,
authenticationdata__username=radius_username)
except User.DoesNotExist:
raise InvalidParamsError('User with provided username does not exits')
rfidcards = RfidCard.objects.filter(user=user, managed_by=request.organization)
for rfidcard in rfidcards:
result.append(rfidcard.identifier)
return result
@jsonrpc_method('rfid.add(radius_username=String, identifier=String) -> Object', site=api_v1_site, authenticated=True)
@manager_required
@transaction.atomic
def rfid_add(request, radius_username, identifier):
"""
Add a new RFID card to the specified user.
Required user level: Manager
Returns the RFID card on success.
radius_username -- Username to search for.
identifier -- RFID card hardware identiefier.
Example return value:
{
"identifier": "02,98:76:54:32",
"registered_at": "2014-09-21T14:16:06+00:00"
"user": "s0000000"
}
Raises error -32602 (Invalid params) if the username does not exist.
Raises error -32602 (Invalid params) if the RFID card already exists for this person.
Raises error -32602 (Invalid params) if the RFID card is already registered by someone else.
"""
try:
user = User.objects.get(authenticationdata__backend=SAML2_BACKEND_NAME,
authenticationdata__username=radius_username)
except User.DoesNotExist:
try:
user = User.objects.get(authenticationdata__backend=RADIUS_BACKEND_NAME,
authenticationdata__username=radius_username)
except User.DoesNotExist:
raise InvalidParamsError('User with provided username does not exits')
try:
rfidcard = RfidCard.objects.select_for_update().get(user=user, identifier=identifier)
except RfidCard.DoesNotExist:
if RfidCard.objects.select_for_update().filter(identifier=identifier).exists():
raise InvalidParamsError('RFID card with provided identifier already registered by someone else')
rfidcard = RfidCard(user=user, identifier=identifier, is_active=True)
rfidcard.save()
if request.organization not in rfidcard.managed_by.all().select_for_update():
rfidcard.managed_by.add(request.organization)
rfidcard.save()
return format_rfidcard(rfidcard)
else:
raise InvalidParamsError('RFID card with provided identifier already exists for this person')
@jsonrpc_method('rfid.remove(radius_username=String, identifier=String) -> Nil', site=api_v1_site, authenticated=True)
@manager_required
@transaction.atomic
def rfid_remove(request, radius_username, identifier):
"""
Remove a RFID card from the specified user.
Required user level: Manager
radius_username -- Username to search for.
identifier -- RFID card hardware identiefier.
Raises error -32602 (Invalid params) if the username does not exist.
Raises error -32602 (Invalid params) if the RFID card does not exist for this person/organization.
"""
try:
user = User.objects.get(authenticationdata__backend=SAML2_BACKEND_NAME,
authenticationdata__username=radius_username)
except User.DoesNotExist:
try:
user = User.objects.get(authenticationdata__backend=RADIUS_BACKEND_NAME,
authenticationdata__username=radius_username)
except User.DoesNotExist:
raise InvalidParamsError('User with provided username does not exits')
try:
rfidcard = RfidCard.objects.select_for_update().get(user=user, identifier=identifier)
except RfidCard.DoesNotExist:
raise InvalidParamsError('RFID card not found')
managed_by = rfidcard.managed_by.all().select_for_update()
if request.organization not in managed_by:
# This RFID card does not exist in this organization
raise InvalidParamsError('RFID card not found')
if len(managed_by) == 1:
# Only this organization left
rfidcard.delete()
else:
rfidcard.managed_by.remove(request.organization)
|
|
#
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
import ConfigParser
import copy
import gevent
import hashlib
import platform
import random
import signal
import socket
import time
from ConfigParser import NoOptionError
from buildinfo import build_info
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from pysandesh.sandesh_base import Sandesh, SandeshConfig, sandesh_global
from pysandesh.sandesh_logger import SandeshLogger
from sandesh.nodeinfo.cpuinfo.ttypes import *
from sandesh.nodeinfo.process_info.constants import ProcessStateNames
from sandesh.nodeinfo.process_info.ttypes import (ProcessInfo, ProcessState,
ProcessStatus)
from sandesh.nodeinfo.ttypes import *
from sandesh.supervisor_events.ttypes import *
from sandesh_common.vns.constants import (INSTANCE_ID_DEFAULT, Module2NodeType,
ModuleNames, NodeTypeNames,
ServiceHttpPortMap, UVENodeTypeNames)
from process_stat import ProcessStat
import utils
try:
from docker_process_manager import DockerProcessInfoManager
except Exception:
# there is no docker library. assumes that code runs not for microservices
DockerProcessInfoManager = None
if platform.system() == 'Windows':
from windows_sys_data import WindowsSysData
from windows_process_manager import WindowsProcessInfoManager
else:
from linux_sys_data import LinuxSysData
class EventManagerTypeInfo(object):
def __init__(self, module_type, object_table, sandesh_packages=[]):
self._module_type = module_type
self._module_name = ModuleNames[self._module_type]
self._object_table = object_table
self._node_type = Module2NodeType[self._module_type]
self._node_type_name = NodeTypeNames[self._node_type]
self._uve_node_type = UVENodeTypeNames[self._node_type]
self._sandesh_packages = sandesh_packages
# end __init__
# end class EventManagerTypeInfo
class EventManager(object):
group_names = []
process_state_db = {}
FAIL_STATUS_DUMMY = 0x1
FAIL_STATUS_DISK_SPACE = 0x2
FAIL_STATUS_SERVER_PORT = 0x4
FAIL_STATUS_NTP_SYNC = 0x8
FAIL_STATUS_DISK_SPACE_NA = 0x10
def __init__(self, config, type_info, unit_names, update_process_list=False):
self.config = config
self.type_info = type_info
self.max_cores = 4
self.max_old_cores = 3
self.max_new_cores = 1
self.all_core_file_list = []
self.tick_count = 0
self.fail_status_bits = 0
self.prev_fail_status_bits = 1
self.instance_id = INSTANCE_ID_DEFAULT
self.sandesh_instance = sandesh_global
self.curr_build_info = None
self.new_build_info = None
self.hostip = self.config.hostip
self.hostname = socket.getfqdn(self.hostip)
self.collector_chksum = 0
self.random_collectors = list()
if config.collectors:
config.collectors.sort()
self.collector_chksum = hashlib.md5("".join(config.collectors)).hexdigest()
self.random_collectors = random.sample(config.collectors, len(config.collectors))
ConnectionState.init(self.sandesh_instance, self.hostname,
self.type_info._module_name, self.instance_id,
staticmethod(ConnectionState.get_conn_state_cb),
NodeStatusUVE, NodeStatus, self.type_info._object_table,
self._get_process_state_cb)
self.sandesh_instance.init_generator(
self.type_info._module_name, self.hostname,
self.type_info._node_type_name, self.instance_id,
self.random_collectors, self.type_info._module_name,
ServiceHttpPortMap[self.type_info._module_name],
['nodemgr.common.sandesh'] + self.type_info._sandesh_packages,
config=SandeshConfig.from_parser_arguments(self.config))
self.sandesh_instance.set_logging_params(
enable_local_log=self.config.log_local,
category=self.config.log_category,
level=self.config.log_level,
file=self.config.log_file,
enable_syslog=self.config.use_syslog,
syslog_facility=self.config.syslog_facility)
self.logger = self.sandesh_instance.logger()
event_handlers = {}
event_handlers['PROCESS_STATE'] = self._event_process_state
event_handlers['PROCESS_COMMUNICATION'] = self._event_process_communication
event_handlers['PROCESS_LIST_UPDATE'] = self._update_current_processes
if platform.system() == 'Windows':
self.system_data = WindowsSysData()
self.process_info_manager = WindowsProcessInfoManager(event_handlers)
else:
gevent.signal(signal.SIGHUP, self.nodemgr_sighup_handler)
self.system_data = LinuxSysData(self.msg_log, self.config.corefile_path)
if DockerProcessInfoManager and (utils.is_running_in_docker()
or utils.is_running_in_kubepod()):
self.process_info_manager = DockerProcessInfoManager(
type_info._module_type, unit_names, event_handlers,
update_process_list)
else:
self.msg_log('Node manager could not detect process manager',
SandeshLevel.SYS_ERR)
exit(-1)
self.process_state_db = self._get_current_processes()
for group in self.process_state_db:
self._send_init_info(group)
def msg_log(self, msg, level):
self.logger.log(SandeshLogger.get_py_logger_level(level), msg)
def _get_process_name(self, process_info):
if process_info['name'] != process_info['group']:
process_name = process_info['group'] + ":" + process_info['name']
else:
process_name = process_info['name']
return process_name
# Get all the current processes in the node
def _get_current_processes(self):
# Add all current processes to make sure nothing misses the radar
process_state_db = {}
# list of all processes on the node is made here
all_processes = self.process_info_manager.get_all_processes()
self.msg_log("DBG: get_current_processes: '%s'" % all_processes,
SandeshLevel.SYS_DEBUG)
for proc_info in all_processes:
proc_name = self._get_process_name(proc_info)
proc_pid = int(proc_info['pid'])
stat = ProcessStat(proc_name, host_ip=self.hostip)
stat.process_state = proc_info['statename']
if 'start' in proc_info:
stat.start_time = str(proc_info['start'])
stat.start_count += 1
stat.pid = proc_pid
if stat.group not in self.group_names:
self.group_names.append(stat.group)
if not stat.group in process_state_db:
process_state_db[stat.group] = {}
process_state_db[stat.group][proc_name] = stat
return process_state_db
# In case the processes in the Node can change, update current processes
def _update_current_processes(self):
process_state_db = self._get_current_processes()
msg = ("DBG: update_current_processes: process_state_db='%s'"
% process_state_db)
self.msg_log(msg, SandeshLevel.SYS_DEBUG)
old_process_set = set(key for group in self.process_state_db
for key in self.process_state_db[group])
new_process_set = set(key for group in process_state_db
for key in process_state_db[group])
common_process_set = new_process_set.intersection(old_process_set)
added_process_set = new_process_set - common_process_set
deleted_process_set = old_process_set - common_process_set
for deleted_process in deleted_process_set:
self._delete_process_handler(deleted_process)
for added_process in added_process_set:
for group in process_state_db:
if added_process in process_state_db[group]:
self._add_process_handler(
added_process, process_state_db[group][added_process])
# process is deleted, send state & remove it from db
def _delete_process_handler(self, deleted_process):
for group in self.process_state_db:
if deleted_process in self.process_state_db[group]:
self.process_state_db[group][deleted_process].deleted = True
self.send_process_state_db([group])
del self.process_state_db[group][deleted_process]
if not self.process_state_db[group]:
del self.process_state_db[group]
return
# new process added, update db & send state
def _add_process_handler(self, added_process, process_info):
group_val = process_info.group
self.process_state_db[group_val][added_process] = process_info
self.send_process_state_db([group_val])
def _get_process_state_cb(self):
state, description = self._get_process_state(self.fail_status_bits)
return state, description
def _get_build_info(self):
# Retrieve build_info from package/rpm and cache it
if self.curr_build_info is not None:
return self.curr_build_info
pkg_version = self._get_package_version()
pkg_version_parts = pkg_version.split('-')
build_id = pkg_version_parts[0]
build_number = pkg_version_parts[1] if len(pkg_version_parts) > 1 else "unknown"
self.new_build_info = build_info + '"build-id" : "' + \
build_id + '", "build-number" : "' + \
build_number + '"}]}'
if (self.new_build_info != self.curr_build_info):
self.curr_build_info = self.new_build_info
return self.curr_build_info
def _update_process_core_file_list(self):
ret_value = False
corenames = self.system_data.get_corefiles()
process_state_db_tmp = copy.deepcopy(self.process_state_db)
for corename in corenames:
try:
exec_name = corename.split('.')[1]
except IndexError:
# Ignore the directories and the files that do not comply
# with the core pattern
continue
for group in self.process_state_db:
for key in self.process_state_db[group]:
if key.startswith(exec_name):
process_state_db_tmp[group][key].core_file_list.append(corename.rstrip())
for group in self.process_state_db:
for key in self.process_state_db[group]:
if set(process_state_db_tmp[group][key].core_file_list) != set(
self.process_state_db[group][key].core_file_list):
self.process_state_db[group][key].core_file_list = process_state_db_tmp[group][key].core_file_list
ret_value = True
return ret_value
def send_process_state_db(self, group_names):
name = self.hostname
for group in group_names:
process_infos = []
delete_status = True
for key in self.process_state_db[group]:
pstat = self.process_state_db[group][key]
process_info = ProcessInfo()
process_info.process_name = key
process_info.process_state = pstat.process_state
process_info.start_count = pstat.start_count
process_info.stop_count = pstat.stop_count
process_info.exit_count = pstat.exit_count
process_info.last_start_time = pstat.start_time
process_info.last_stop_time = pstat.stop_time
process_info.last_exit_time = pstat.exit_time
process_info.core_file_list = pstat.core_file_list
process_infos.append(process_info)
# in tor-agent case, we should use tor-agent name as uve key
name = pstat.name
if pstat.deleted == False:
delete_status = False
if not process_infos:
continue
# send node UVE
node_status = NodeStatus()
node_status.name = name
node_status.deleted = delete_status
node_status.process_info = process_infos
node_status.build_info = self._get_build_info()
node_status_uve = NodeStatusUVE(table=self.type_info._object_table,
data=node_status)
msg = ('send_process_state_db: Sending UVE: {}'.format(node_status_uve))
self.msg_log(msg, SandeshLevel.SYS_INFO)
node_status_uve.send()
def _send_process_state(self, process_info):
pname = self._get_process_name(process_info)
# update process stats
if pname in list(key for group in self.process_state_db for key in self.process_state_db[group]):
for group in self.process_state_db:
if pname in self.process_state_db[group]:
proc_stat = self.process_state_db[group][pname]
else:
proc_stat = ProcessStat(pname, host_ip=self.hostip)
pstate = process_info['state']
proc_stat.process_state = pstate
send_uve = False
if (pstate == 'PROCESS_STATE_RUNNING'):
proc_stat.start_count += 1
proc_stat.start_time = str(int(time.time() * 1000000))
send_uve = True
proc_stat.pid = int(process_info['pid'])
if (pstate == 'PROCESS_STATE_STOPPED'):
proc_stat.stop_count += 1
send_uve = True
proc_stat.stop_time = str(int(time.time() * 1000000))
proc_stat.last_exit_unexpected = False
proc_stat.last_cpu = None
proc_stat.last_time = 0
if (pstate == 'PROCESS_STATE_EXITED'):
proc_stat.exit_count += 1
send_uve = True
proc_stat.exit_time = str(int(time.time() * 1000000))
proc_stat.last_cpu = None
proc_stat.last_time = 0
if not process_info['expected']:
self.msg_log('%s with pid: %s exited abnormally' %
(pname, process_info['pid']), SandeshLevel.SYS_ERR)
proc_stat.last_exit_unexpected = True
# check for core file for this exit
corename = self.system_data.find_corefile(
"core.[A-Za-z]*." + process_info['pid'] + "*")
if ((corename is not None) and (len(corename.rstrip()) >= 1)):
self.msg_log('core file: %s' % (corename),
SandeshLevel.SYS_ERR)
# before adding to the core file list make
# sure that we do not have too many cores
self.msg_log('core_file_list: %s, max_cores: %d' %
(str(proc_stat.core_file_list), self.max_cores),
SandeshLevel.SYS_DEBUG)
if (len(proc_stat.core_file_list) >= self.max_cores):
# get rid of old cores
start = self.max_old_cores
end = len(proc_stat.core_file_list) - \
self.max_new_cores + 1
core_files_to_be_deleted = \
proc_stat.core_file_list[start:end]
self.system_data.remove_corefiles(core_files_to_be_deleted)
# now delete the cores from the list as well
del proc_stat.core_file_list[start:end]
# now add the new core to the core file list
proc_stat.core_file_list.append(corename.rstrip())
self.msg_log('# of cores for %s: %d' % (pname,
len(proc_stat.core_file_list)), SandeshLevel.SYS_DEBUG)
send_init_uve = False
# update process state database
if not proc_stat.group in self.process_state_db:
self.process_state_db[proc_stat.group] = {}
send_init_uve = True
self.process_state_db[proc_stat.group][pname] = proc_stat
if send_uve:
if (send_init_uve):
self._send_init_info(proc_stat.group)
self.send_process_state_db([proc_stat.group])
def send_nodemgr_process_status(self):
if self.prev_fail_status_bits == self.fail_status_bits:
return
self.prev_fail_status_bits = self.fail_status_bits
fail_status_bits = self.fail_status_bits
state, description = self._get_process_state(fail_status_bits)
conn_infos = ConnectionState._connection_map.values()
(cb_state, cb_description) = ConnectionState.get_conn_state_cb(conn_infos)
if (cb_state == ProcessState.NON_FUNCTIONAL):
state = ProcessState.NON_FUNCTIONAL
if description != '':
description += ' '
description += cb_description
process_status = ProcessStatus(
module_id=self.type_info._module_name, instance_id=self.instance_id,
state=ProcessStateNames[state], description=description)
process_status_list = []
process_status_list.append(process_status)
node_status = NodeStatus(name=self.hostname,
process_status=process_status_list)
node_status_uve = NodeStatusUVE(table=self.type_info._object_table,
data=node_status)
msg = ('send_nodemgr_process_status: Sending UVE:'
+ str(node_status_uve))
self.msg_log(msg, SandeshLevel.SYS_INFO)
node_status_uve.send()
def _get_package_version(self):
pkg_version = utils.get_package_version('contrail-nodemgr')
if pkg_version is None:
self.msg_log('Error getting %s package version' % (
'contrail-nodemgr'), SandeshLevel.SYS_ERR)
pkg_version = "unknown"
return pkg_version
def _send_init_info(self, group_name):
key = next(key for key in self.process_state_db[group_name])
# system_cpu_info
sys_cpu = SystemCpuInfo()
sys_cpu.num_socket = self.system_data.get_num_socket()
sys_cpu.num_cpu = self.system_data.get_num_cpu()
sys_cpu.num_core_per_socket = self.system_data.get_num_core_per_socket()
sys_cpu.num_thread_per_core = self.system_data.get_num_thread_per_core()
node_status = NodeStatus(
name=self.process_state_db[group_name][key].name,
system_cpu_info=sys_cpu,
build_info=self._get_build_info())
# installed/running package version
pkg_version = self._get_package_version()
node_status.installed_package_version = pkg_version
node_status.running_package_version = pkg_version
node_status_uve = NodeStatusUVE(table=self.type_info._object_table,
data=node_status)
node_status_uve.send()
def _get_group_processes_mem_cpu_usage(self, group_name):
process_mem_cpu_usage = {}
for key in self.process_state_db[group_name]:
pstat = self.process_state_db[group_name][key]
if pstat.process_state != 'PROCESS_STATE_RUNNING':
continue
mem_cpu_usage_data = (
self.process_info_manager.get_mem_cpu_usage_data(
pstat.pid, pstat.last_cpu, pstat.last_time))
process_mem_cpu = mem_cpu_usage_data.get_process_mem_cpu_info()
process_mem_cpu.__key = pstat.pname
process_mem_cpu_usage[process_mem_cpu.__key] = process_mem_cpu
pstat.last_cpu = mem_cpu_usage_data.last_cpu
pstat.last_time = mem_cpu_usage_data.last_time
return process_mem_cpu_usage
def _get_process_state(self, fail_status_bits):
if not fail_status_bits:
return ProcessState.FUNCTIONAL, ''
state = ProcessState.NON_FUNCTIONAL
description = self.get_failbits_nodespecific_desc(fail_status_bits)
if fail_status_bits & self.FAIL_STATUS_NTP_SYNC:
description.append("NTP state unsynchronized.")
return state, " ".join(description)
# method to implement in children
def get_failbits_nodespecific_desc(self, fail_status_bits):
return list()
def _event_process_state(self, process_info):
msg = ("DBG: event_process_state:" + process_info['name'] + ","
+ "group:" + process_info['group'] + "," + "state:"
+ process_info['state'])
self.msg_log(msg, SandeshLevel.SYS_DEBUG)
self._send_process_state(process_info)
def _event_process_communication(self, pdata):
flag_and_value = pdata.partition(":")
msg = ("DBG: event_process_communication: Flag:" + flag_and_value[0] +
" Value:" + flag_and_value[2])
self.msg_log(msg, SandeshLevel.SYS_DEBUG)
def _event_tick_60(self):
self.tick_count += 1
for group in self.process_state_db:
key = next(key for key in self.process_state_db[group])
# get disk usage info periodically
disk_usage_info = self.system_data.get_disk_usage()
# typical ntp sync time is about 5 min - first time,
# we scan only after 10 min
if self.tick_count >= 10:
if self.system_data.check_ntp_status():
self.fail_status_bits &= ~self.FAIL_STATUS_NTP_SYNC
else:
self.fail_status_bits |= self.FAIL_STATUS_NTP_SYNC
self.send_nodemgr_process_status()
if self._update_process_core_file_list():
self.send_process_state_db([group])
process_mem_cpu_usage = self._get_group_processes_mem_cpu_usage(group)
# get system mem/cpu usage
system_mem_usage = self.system_data.get_sys_mem_info(
self.type_info._uve_node_type)
system_cpu_usage = self.system_data.get_sys_cpu_info(
self.type_info._uve_node_type)
# send above encoded buffer
node_status = NodeStatus(
name=self.process_state_db[group][key].name,
disk_usage_info=disk_usage_info,
system_mem_usage=system_mem_usage,
system_cpu_usage=system_cpu_usage,
process_mem_cpu_usage=process_mem_cpu_usage)
# encode other core file
if self.system_data.update_all_core_file():
self.send_process_state_db(self.group_names)
node_status.all_core_file_list = self.all_core_file_list
node_status_uve = NodeStatusUVE(table=self.type_info._object_table,
data=node_status)
self.msg_log('DBG: event_tick_60: node_status=%s' % node_status,
SandeshLevel.SYS_DEBUG)
node_status_uve.send()
def send_init_data(self):
self.send_nodemgr_process_status()
self.send_process_state_db(self.group_names)
# can be overriden in children
def do_periodic_events(self):
self._event_tick_60()
def run_periodically(self, function, interval, *args, **kwargs):
while True:
before = time.time()
function(*args, **kwargs)
duration = time.time() - before
if duration < interval:
gevent.sleep(interval - duration)
else:
self.msg_log(
'function %s duration exceeded %f interval (took %f)'
% (function.__name__, interval, duration),
SandeshLevel.SYS_ERR)
def runforever(self):
self.process_info_manager.runforever()
def nodemgr_sighup_handler(self):
collector_list = list()
config = ConfigParser.SafeConfigParser()
config.read([self.config.config_file_path])
if 'COLLECTOR' in config.sections():
try:
collector = config.get('COLLECTOR', 'server_list')
collector_list = collector.split()
except ConfigParser.NoOptionError:
pass
collector_list.sort()
new_chksum = hashlib.md5("".join(collector_list)).hexdigest()
if new_chksum != self.collector_chksum:
self.collector_chksum = new_chksum
self.random_collectors = random.sample(collector_list, len(collector_list))
self.sandesh_instance.reconfig_collectors(self.random_collectors)
|
|
import csv
import logging
import pathlib
from optparse import Values
from typing import Iterator, List, NamedTuple, Optional, Tuple
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.metadata import BaseDistribution, get_default_environment
from pip._internal.utils.misc import write_output
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""
Show information about one or more installed packages.
The output is in RFC-compliant mail header format.
"""
usage = """
%prog [options] <package> ..."""
ignore_require_venv = True
def add_options(self) -> None:
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
class _PackageInfo(NamedTuple):
name: str
version: str
location: str
requires: List[str]
required_by: List[str]
installer: str
metadata_version: str
classifiers: List[str]
summary: str
homepage: str
author: str
author_email: str
license: str
entry_points: List[str]
files: Optional[List[str]]
def _covert_legacy_entry(entry: Tuple[str, ...], info: Tuple[str, ...]) -> str:
"""Convert a legacy installed-files.txt path into modern RECORD path.
The legacy format stores paths relative to the info directory, while the
modern format stores paths relative to the package root, e.g. the
site-packages directory.
:param entry: Path parts of the installed-files.txt entry.
:param info: Path parts of the egg-info directory relative to package root.
:returns: The converted entry.
For best compatibility with symlinks, this does not use ``abspath()`` or
``Path.resolve()``, but tries to work with path parts:
1. While ``entry`` starts with ``..``, remove the equal amounts of parts
from ``info``; if ``info`` is empty, start appending ``..`` instead.
2. Join the two directly.
"""
while entry and entry[0] == "..":
if not info or info[-1] == "..":
info += ("..",)
else:
info = info[:-1]
entry = entry[1:]
return str(pathlib.Path(*info, *entry))
def search_packages_info(query: List[str]) -> Iterator[_PackageInfo]:
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
env = get_default_environment()
installed = {
dist.canonical_name: dist
for dist in env.iter_distributions()
}
query_names = [canonicalize_name(name) for name in query]
missing = sorted(
[name for name, pkg in zip(query, query_names) if pkg not in installed]
)
if missing:
logger.warning('Package(s) not found: %s', ', '.join(missing))
def _get_requiring_packages(current_dist: BaseDistribution) -> List[str]:
return [
dist.metadata["Name"] or "UNKNOWN"
for dist in installed.values()
if current_dist.canonical_name in {
canonicalize_name(d.name) for d in dist.iter_dependencies()
}
]
def _files_from_record(dist: BaseDistribution) -> Optional[Iterator[str]]:
try:
text = dist.read_text('RECORD')
except FileNotFoundError:
return None
# This extra Path-str cast normalizes entries.
return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines()))
def _files_from_legacy(dist: BaseDistribution) -> Optional[Iterator[str]]:
try:
text = dist.read_text('installed-files.txt')
except FileNotFoundError:
return None
paths = (p for p in text.splitlines(keepends=False) if p)
root = dist.location
info = dist.info_directory
if root is None or info is None:
return paths
try:
info_rel = pathlib.Path(info).relative_to(root)
except ValueError: # info is not relative to root.
return paths
if not info_rel.parts: # info *is* root.
return paths
return (
_covert_legacy_entry(pathlib.Path(p).parts, info_rel.parts)
for p in paths
)
for query_name in query_names:
try:
dist = installed[query_name]
except KeyError:
continue
try:
entry_points_text = dist.read_text('entry_points.txt')
entry_points = entry_points_text.splitlines(keepends=False)
except FileNotFoundError:
entry_points = []
files_iter = _files_from_record(dist) or _files_from_legacy(dist)
if files_iter is None:
files: Optional[List[str]] = None
else:
files = sorted(files_iter)
metadata = dist.metadata
yield _PackageInfo(
name=dist.raw_name,
version=str(dist.version),
location=dist.location or "",
requires=[req.name for req in dist.iter_dependencies()],
required_by=_get_requiring_packages(dist),
installer=dist.installer,
metadata_version=dist.metadata_version or "",
classifiers=metadata.get_all("Classifier", []),
summary=metadata.get("Summary", ""),
homepage=metadata.get("Home-page", ""),
author=metadata.get("Author", ""),
author_email=metadata.get("Author-email", ""),
license=metadata.get("License", ""),
entry_points=entry_points,
files=files,
)
def print_results(
distributions: Iterator[_PackageInfo],
list_files: bool,
verbose: bool,
) -> bool:
"""
Print the information from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
write_output("---")
write_output("Name: %s", dist.name)
write_output("Version: %s", dist.version)
write_output("Summary: %s", dist.summary)
write_output("Home-page: %s", dist.homepage)
write_output("Author: %s", dist.author)
write_output("Author-email: %s", dist.author_email)
write_output("License: %s", dist.license)
write_output("Location: %s", dist.location)
write_output("Requires: %s", ', '.join(dist.requires))
write_output("Required-by: %s", ', '.join(dist.required_by))
if verbose:
write_output("Metadata-Version: %s", dist.metadata_version)
write_output("Installer: %s", dist.installer)
write_output("Classifiers:")
for classifier in dist.classifiers:
write_output(" %s", classifier)
write_output("Entry-points:")
for entry in dist.entry_points:
write_output(" %s", entry.strip())
if list_files:
write_output("Files:")
if dist.files is None:
write_output("Cannot locate RECORD or installed-files.txt")
else:
for line in dist.files:
write_output(" %s", line.strip())
return results_printed
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite"""
import os
try:
import sendfile
SENDFILE_SUPPORTED = True
except ImportError:
SENDFILE_SUPPORTED = False
import routes
import webob
from glance.api.middleware import context
from glance.api.v1 import router
import glance.common.client
from glance.registry.api import v1 as rserver
from glance.tests import utils
VERBOSE = False
DEBUG = False
class FakeRegistryConnection(object):
def __init__(self, registry=None):
self.registry = registry or rserver
def __call__(self, *args, **kwargs):
# NOTE(flaper87): This method takes
# __init__'s place in the chain.
return self
def connect(self):
return True
def close(self):
return True
def request(self, method, url, body=None, headers=None):
self.req = webob.Request.blank("/" + url.lstrip("/"))
self.req.method = method
if headers:
self.req.headers = headers
if body:
self.req.body = body
def getresponse(self):
mapper = routes.Mapper()
server = self.registry.API(mapper)
# NOTE(markwash): we need to pass through context auth information if
# we have it.
if 'X-Auth-Token' in self.req.headers:
api = utils.FakeAuthMiddleware(server)
else:
api = context.UnauthenticatedContextMiddleware(server)
webob_res = self.req.get_response(api)
return utils.FakeHTTPResponse(status=webob_res.status_int,
headers=webob_res.headers,
data=webob_res.body)
def stub_out_registry_and_store_server(stubs, base_dir, **kwargs):
"""
Mocks calls to 127.0.0.1 on 9191 and 9292 for testing so
that a real Glance server does not need to be up and
running
"""
class FakeSocket(object):
def __init__(self, *args, **kwargs):
pass
def fileno(self):
return 42
class FakeSendFile(object):
def __init__(self, req):
self.req = req
def sendfile(self, o, i, offset, nbytes):
os.lseek(i, offset, os.SEEK_SET)
prev_len = len(self.req.body)
self.req.body += os.read(i, nbytes)
return len(self.req.body) - prev_len
class FakeGlanceConnection(object):
def __init__(self, *args, **kwargs):
self.sock = FakeSocket()
self.stub_force_sendfile = kwargs.get('stub_force_sendfile',
SENDFILE_SUPPORTED)
def connect(self):
return True
def close(self):
return True
def _clean_url(self, url):
#TODO(bcwaldon): Fix the hack that strips off v1
return url.replace('/v1', '', 1) if url.startswith('/v1') else url
def putrequest(self, method, url):
self.req = webob.Request.blank(self._clean_url(url))
if self.stub_force_sendfile:
fake_sendfile = FakeSendFile(self.req)
stubs.Set(sendfile, 'sendfile', fake_sendfile.sendfile)
self.req.method = method
def putheader(self, key, value):
self.req.headers[key] = value
def endheaders(self):
hl = [i.lower() for i in self.req.headers.keys()]
assert not ('content-length' in hl and
'transfer-encoding' in hl), \
'Content-Length and Transfer-Encoding are mutually exclusive'
def send(self, data):
# send() is called during chunked-transfer encoding, and
# data is of the form %x\r\n%s\r\n. Strip off the %x and
# only write the actual data in tests.
self.req.body += data.split("\r\n")[1]
def request(self, method, url, body=None, headers=None):
self.req = webob.Request.blank(self._clean_url(url))
self.req.method = method
if headers:
self.req.headers = headers
if body:
self.req.body = body
def getresponse(self):
mapper = routes.Mapper()
api = context.UnauthenticatedContextMiddleware(router.API(mapper))
res = self.req.get_response(api)
# httplib.Response has a read() method...fake it out
def fake_reader():
return res.body
setattr(res, 'read', fake_reader)
return res
def fake_get_connection_type(client):
"""
Returns the proper connection type
"""
DEFAULT_REGISTRY_PORT = 9191
DEFAULT_API_PORT = 9292
if (client.port == DEFAULT_API_PORT and
client.host == '0.0.0.0'):
return FakeGlanceConnection
elif (client.port == DEFAULT_REGISTRY_PORT and
client.host == '0.0.0.0'):
rserver = kwargs.get("registry", None)
return FakeRegistryConnection(registry=rserver)
def fake_image_iter(self):
for i in self.source.app_iter:
yield i
def fake_sendable(self, body):
force = getattr(self, 'stub_force_sendfile', None)
if force is None:
return self._stub_orig_sendable(body)
else:
if force:
assert glance.common.client.SENDFILE_SUPPORTED
return force
stubs.Set(glance.common.client.BaseClient, 'get_connection_type',
fake_get_connection_type)
setattr(glance.common.client.BaseClient, '_stub_orig_sendable',
glance.common.client.BaseClient._sendable)
stubs.Set(glance.common.client.BaseClient, '_sendable',
fake_sendable)
def stub_out_registry_server(stubs, **kwargs):
"""
Mocks calls to 127.0.0.1 on 9191 for testing so
that a real Glance Registry server does not need to be up and
running
"""
def fake_get_connection_type(client):
"""
Returns the proper connection type
"""
DEFAULT_REGISTRY_PORT = 9191
if (client.port == DEFAULT_REGISTRY_PORT and
client.host == '0.0.0.0'):
rserver = kwargs.pop("registry", None)
return FakeRegistryConnection(registry=rserver)
def fake_image_iter(self):
for i in self.response.app_iter:
yield i
stubs.Set(glance.common.client.BaseClient, 'get_connection_type',
fake_get_connection_type)
|
|
# Copyright (C) 2007 - 2009 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
# In this file, I'm going to put most of the guts of the Web services,
# so that I can maintain a CGI script as I migrate to CherryPy.
# Utilities, to mimic getfirst and getlist for CGI forms.
import os, sys
def _getfirst(val):
if val is None:
return None
elif type(val) in (list, tuple):
return val[0]
else:
return val
def _getlist(val):
if val is None:
return None
elif type(val) in (list, tuple):
return val
else:
return [val]
import MAT
_jsonIO = MAT.DocumentIO.getDocumentIO('mat-json')
# For optionsFromCGI. There's probably a better
# way of doing this, but right now I don't want to
# fiddle with the rest of the code.
class FakeFieldStorage:
def __init__(self, d, **kw):
self.dict = d
self.dict.update(kw)
def keys(self):
return self.dict.keys()
def getfirst(self, key, default = None):
try:
return _getfirst(self.dict[key])
except KeyError:
return default
def getlist(self, key, default = None):
try:
return _getlist(self.dict[key])
except KeyError:
return default
def _readFile(path):
fp = open(path, "r")
s = fp.read()
fp.close()
return s
def filePatternExpand(file, **kw):
# Create and render a page.
s = _readFile(file)
# Now, we need to replace
# PAT_YUI_DIRECTORY
# at least.
replDir = {"PAT_YUI_DIRECTORY": os.path.basename(MAT.Config.MATConfig["YUI_JS_LIB"])}
replDir.update(kw)
for key, val in replDir.items():
s = s.replace(key, val)
return s
# This function simply checks to ensure that the
# client and server are the same machine. It APPEARS
# that when I ask for localhost, both remote and local
# addresses are 127.0.0.1.
class WSInfo:
def __init__(self, svc, # These two are provided internally.
checkFolder = False, checkFile = False,
# The rest of these are from the kw args of the operation.
workspace_key = None, workspace_dir = None,
read_only = None, folder = None, file = None,
workspace_search_dirs = None,
**kw):
self.svc = svc
self.file = None
self.folder = None
self.success = True
self.error = None
self.aggregator = None
self.workspace = None
self.wsDirSuffix = None
# Keys.
wsKey = _getfirst(workspace_key)
wsDir = _getfirst(workspace_dir)
self.readOnly = readOnly = _getfirst(read_only) == "yes"
if not self.svc._checkWorkspaceAccess():
# Who knows how we might have gotten here.
self.success = False
self.error = "Server and client are not on the same machine, and remote access is not enabled."
elif (wsKey != self.svc.wsKey):
self.success = False
self.error = "Workspace key is incorrect"
elif (wsDir is None) or (wsDir.strip() == ""):
self.success = False
self.error = "No workspace specified"
elif (workspace_search_dirs is None) and (not os.path.isabs(wsDir)):
self.success = False
self.error = "Workspace directory path must be absolute."
elif (workspace_search_dirs is not None) and os.path.isabs(wsDir):
self.success = False
self.error = "Workspace directory path cannot be absolute (workspace search dirs are specified)"
else:
try:
# Now, try to find a workspace.
if os.path.isabs(wsDir):
w = self.workspace = MAT.Workspace.Workspace(wsDir)
self.wsDirSuffix = self.workspace.dir
else:
oneIsAbsolute = False
w = None
for d in workspace_search_dirs:
if os.path.isabs(d):
oneIsAbsolute = True
# Ugh. We have to be very paranoid here. If wsDir
# starts with .., you have to make sure you can't
# escape. The abspath of the search dir has to be
# a prefix of the abspath of the joined dir.
newD = os.path.abspath(os.path.join(d, wsDir))
if not newD.startswith(os.path.abspath(d)):
continue
try:
w = self.workspace = MAT.Workspace.Workspace(newD)
self.wsDirSuffix = wsDir
wsDir = newD
break
except:
# Nope, can't open it.
continue
if not oneIsAbsolute:
self.success = False
self.error = "None of the workspace container directories are absolute pathnames"
elif w is None:
self.success = False
self.error = "Couldn't find an openable workspace at path '%s' in any of the workspace container directories" % wsDir
if self.success:
if not w.dirsAccessible(forWriting = not readOnly):
self.success = False
self.error = "Server does not have appropriate permissions on workspace."
else:
if checkFolder:
folder = self.folder = _getfirst(folder)
if not w.folders.has_key(folder):
self.success = False
self.error = "Unknown folder '%s'." % folder
return
if checkFile:
file = self.file = _getfirst(file)
if file is None:
self.success = False
self.error = "No file basename specified."
return
form = FakeFieldStorage(kw.copy(), folder = folder, file = file)
self.aggregator = MAT.Operation.CGIOpArgumentAggregator(form)
self.params = kw
except MAT.Workspace.WorkspaceError, e:
self.success = False
self.error = str(e)
except IOError, e:
self.success = False
self.error = "File access error while opening workspace: " + str(e)
class WebService:
def __init__(self, remoteAddr, localAddr, wsKey, plugins = None,
allowRemoteWorkspaceAccess = False):
self.remoteAddr = remoteAddr
self.localAddr = localAddr
self.allowRemoteWorkspaceAccess = allowRemoteWorkspaceAccess
self.wsKey = wsKey
# If this is specified, the plugins will be loaded, but only
# those from this plugin dir basename will be returned.
self.plugins = plugins
if self.plugins is None:
self.plugins = MAT.PluginMgr.LoadPlugins()
#
# Utility methods.
#
def _checkWorkspaceAccess(self):
return self.allowRemoteWorkspaceAccess or (self.remoteAddr == self.localAddr)
# This utility is used both by load and by steps. The file we're
# loading may come from a demo, in which case I want to use the demo
# prefix rather than the task.
def _checkTaskInformation(self, steps, input = None, input_file = None,
demo = None, task = None, workflow = None,
workflowCanBeNull = False,
**kw):
TASK = _getfirst(task)
WORKFLOW = _getfirst(workflow)
if (TASK is None) or ((WORKFLOW is None) and (not workflowCanBeNull)):
return False, "app or workflow not specified", None
plugins = self.plugins
literalTaskObj = plugins.getTask(TASK)
if literalTaskObj is None:
return False, ("task %s not found" % TASK), None
INPUT = _getfirst(input)
if INPUT is None:
f = _getfirst(input_file)
if f is not None:
# It's either an absolute path, or it's somewhere underneath
# the task or the demo, if there is a demo.
if not os.path.isabs(f):
if demo is not None:
root = plugins.getRecorded(demo)[0]
else:
root = literalTaskObj.taskRoot
f = os.path.join(root, f)
try:
INPUT = _readFile(f)
except IOError, e:
return False, str(e), None
if INPUT is None:
return False, "no input", None
# Start with the nominal task to get the parameters. The
# operational task we get later.
form = FakeFieldStorage(kw.copy(), input = input, task = task, workflow = workflow)
aggregator = MAT.Operation.CGIOpArgumentAggregator(form)
literalTaskObj.addOptions(aggregator)
pDir = aggregator.extract()
try:
TASK_OBJ = literalTaskObj.getTaskImplementation(WORKFLOW, steps, **pDir)
if TASK_OBJ is None:
return False, ("operational task for %s not found" % TASK), None
except KeyError:
return False, ("operational task for %s not found" % TASK), None
except MAT.PluginMgr.PluginError, e:
return False, ("operational task for %s not found: %s" % (TASK, str(e))), None
return True, None, (plugins, pDir, TASK_OBJ, INPUT, WORKFLOW)
import re
INITSPACE = re.compile("^[ ]+")
def _shortHTMLBacktrace(self, (t, val, tb)):
finalList = []
import traceback, cgi
for s in traceback.format_tb(tb):
s = cgi.escape(s)
m = self.INITSPACE.match(s)
if m is not None:
s = (" " * m.end()) + s[m.end():]
s = s.replace("\n", "<br>")
finalList.append(s)
return "<div>" + "\n".join(finalList) + "</div>"
def show_main_page(self, **kw):
# In order to find the web template, we need to compute
# the root from where we are, which is lib/mat/python/MAT
# in the distribution. We want web/templates/.
return self.show_page(os.path.join("web", "templates", "workbench_tpl.html"), **kw)
def show_demo_page(self, demo, **kw):
from MAT import json
kw.update(PAT_DEMO_TITLE = demo.name,
PAT_DEMO_DESCRIPTION = demo.description,
PAT_DEMO_NAME = demo.webDir,
PAT_DEMO_CONFIGURATION = json.dumps(demo.activities))
# In order to find the web template, we need to compute
# the root from where we are, which is lib/mat/python/MAT
# in the distribution. We want web/templates/.
return self.show_page(os.path.join("web", "templates", "demo_tpl.html"), **kw)
def show_page(self, page, inline_js = True, tasksOfInterest = None, **kw):
matDir = os.path.dirname(os.path.abspath(__file__))
MAT_PKG_HOME = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(matDir))))
# Now, we need to replace
# PAT_TASK_JAVASCRIPT
# PAT_TASK_CSS
# PAT_TASKS_OF_INTEREST
replDir = {"PAT_TASKS_OF_INTEREST": "null"}
if tasksOfInterest is not None:
from MAT import json
replDir["PAT_TASKS_OF_INTEREST"] = json.dumps(tasksOfInterest)
# The first few are in the settings; the last two are the contents of all the
# files for all the tasks we know of.
# Odd: PAT_ used to be MF_, but that worked for the second two
# and not for the first, in CGI. Outside CGI, it worked fine.
# No clue.
plugins = self.plugins
allJS = plugins.getJSFiles()
allCSS = plugins.getCSSFiles()
# print >> sys.stderr, "Plugging in JS files:", " ".join(allJS)
# print >> sys.stderr, "Plugging in CSS files:", " ".join(allCSS)
jsString = '<script type="text/javascript">\n' + "\n".join([_readFile(p) for p in allJS]) + "\n</script>"
cssString = '<style type="text/css">\n' + "\n".join([_readFile(p) for p in allCSS]) + "\n</style>"
replDir["PAT_TASK_JAVASCRIPT"] = jsString
replDir["PAT_TASK_CSS"] = cssString
replDir["PAT_MAT_VERSION"] = MAT.Config.MATConfig.getMATVersion()
replDir.update(kw)
# Create and render a page.
return filePatternExpand(os.path.join(MAT_PKG_HOME, page), **replDir)
# Currently, input is a JSON string. Returns the contents of the file
# and the recommended filename.
def _computeSaveFilename(self, filename):
fname = _getfirst(filename)
if not fname:
return "foo.txt"
else:
return os.path.split(fname)[1]
def save(self, input = "", filename = None, out_type = None, task = None, **kw):
TASK = _getfirst(task)
plugins = self.plugins
literalTaskObj = plugins.getTask(TASK)
# Rich JSON documents are ALWAYS saved as utf-8.
INPUT = _getfirst(input)
outType = _getfirst(out_type)
# INPUT is a JSON rich document. If the out type is
# mat-json, then just echo the input. Otherwise, digest it
# and write out the appropriate format.
INPUT = INPUT.decode('utf-8')
if outType == "mat-json":
return {"success": True, "bytes": INPUT.encode('utf-8'), "filename": self._computeSaveFilename(filename)}
else:
INPUT = _jsonIO.readFromUnicodeString(INPUT, taskSeed = literalTaskObj)
return self._save(INPUT, outType, filename, literalTaskObj, **kw)
def _save(self, doc, outType, filename, literalTaskObj, **kw):
# Convert the CGI keywords into useful keyword arguments.
ioCls = MAT.DocumentIO.getDocumentIOClass(outType)
form = FakeFieldStorage(kw.copy())
aggregator = MAT.Operation.CGIOpArgumentAggregator(form)
ioCls.addOutputOptions(aggregator)
pDir = aggregator.extract()
return {"success": True,
"bytes": ioCls(task = literalTaskObj, **pDir).writeToByteSequence(doc, encoding = "utf-8"),
"filename": self._computeSaveFilename(filename)}
# In order to save a reconciliation document, we need to first run it through its
# updates, and then return the document to the frontend for redisplay, and THEN
# the frontend can do a generic save.
def _updateAndReconcile(self, literalTaskObj, recDoc):
from MAT.ReconciliationPhase import HumanDecisionPhase
# The annotator that the non-workspace stuff uses is "unknown human"
pObj = HumanDecisionPhase(human_decision_user = "unknown human")
pObj.updateSavedSegments(literalTaskObj, recDoc)
vDict = recDoc._votesForSegments()
# Reconcile. We don't need to check if it's all done.
pObj.reconcile(recDoc, "unknown human", vDict)
def update_reconciliation_document(self, input = None, task = None, **kw):
TASK = _getfirst(task)
plugins = self.plugins
literalTaskObj = plugins.getTask(TASK)
INPUT = _jsonIO.readFromUnicodeString(_getfirst(input).decode('utf-8'), taskSeed = literalTaskObj)
# So now we have a reconciliation document.
self._updateAndReconcile(literalTaskObj, INPUT)
# Now, every segment in the reconciliation doc should be marked "to review".
# NO! Only the segments which are human gold. The reason I have to do this
# is that it's as if the document is being "reopened" - updateSavedSegments
# clears out to_review.
for seg in INPUT.getAnnotations(["SEGMENT"]):
if seg["status"] == "human gold":
seg["to_review"] = "yes"
return {"success": True, "doc": _jsonIO.renderJSONObj(INPUT)}
# If it's not fully reconciled, this should barf, I think. Actually, I
# think export() should barf.
def export_reconciliation_doc(self, input = "", filename = None, out_type = None, task = None, for_save = False, **kw):
from MAT.ReconciliationDocument import ReconciliationError
TASK = _getfirst(task)
plugins = self.plugins
literalTaskObj = plugins.getTask(TASK)
# Rich JSON documents are ALWAYS saved as utf-8.
outType = _getfirst(out_type)
recDoc = _jsonIO.readFromUnicodeString(_getfirst(input).decode('utf-8'), taskSeed = literalTaskObj)
# Just in case. This should already have been done, but just in case.
self._updateAndReconcile(literalTaskObj, recDoc)
try:
exportedDoc = recDoc.export(literalTaskObj);
if for_save:
return self._save(exportedDoc, outType, filename, literalTaskObj, **kw)
else:
return {"success": True, "doc": _jsonIO.renderJSONObj(exportedDoc)}
except ReconciliationError, e:
return {"success": False, "error": str(e)}
# Currently, log is a JSON string (not an object). Returns the
# contents of the CSV file.
def save_log(self, log = None, **kw):
# I'm going to do the log mangling here, because it's really not
# relevant to anything else in the system. Not much; originally I
# was doing some raw logging in the frontend and augmenting it
# here, but that turned out to be unwieldy, so I moved all the
# ugliness into the frontend, and here we just turn it into CSV.
from MAT import json
log = json.loads(_getfirst(log))
import datetime
fname = datetime.datetime.now().strftime("log_%Y%m%d_%H_%M_%S.csv")
# The logs consist of a couple messages which the Yahoo logger itself
# provides, but mostly ours. It's a list of hashes. Most of them have
# details like gesture, file, etc., but some like log_start, etc., don't.
# NOTE: the msg entries will only be objects for the log
# elements we saved. We'll also be getting other stuff from
# the log, which are just strings. We'll be skipping these.
convertedLogs = []
headers = ["timestamp", "rel_seconds", "gesture", "file", "folder", "workspace", "window", "action"]
extraHeaders = []
startTime = None
import time
for msg in log:
# Let's not deal with that awful overflow.
# Let's do our calculations in ms, and then
# move the decimal point.
t = msg["ms"]
if startTime is None:
startTime = t
diffTime = "0.0"
else:
# I want to ensure that the time is
# consistently marked in ms.
diffTime = "%04d" % (t - startTime,)
diffTime = diffTime[:-3] + "." + diffTime[-3:]
remainderStr = "%.03f" % (float(t) / 1000.0,)
remainder = remainderStr[remainderStr.find("."):]
# For some reason, Excel barfs on the time string when
# it has a space in it.
ts = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(float(t) / 1000.0)) + remainder
# Now, get rid of the milliseconds.
del msg["ms"]
msg["timestamp"] = ts
msg["rel_seconds"] = diffTime
for key in msg.keys():
if (key not in headers) and (key not in extraHeaders):
extraHeaders.append(key)
convertedLogs.append(msg)
extraHeaders.sort()
hDict = dict([(a, a) for a in headers + extraHeaders])
convertedLogs[0:0] = [hDict]
import csv, cStringIO
output = cStringIO.StringIO()
csv.DictWriter(output, headers + extraHeaders, "").writerows(convertedLogs)
v = output.getvalue()
output.close()
return {"success": True, "bytes": v, "filename": fname}
# Returns an object suitable for converting to JSON.
def fetch_tasks(self, **kw):
try:
plugins = self.plugins
# Now, we serialize that information appropriately.
dir = plugins.getCGIMetadata()
if not dir:
pNames = [False, "Error: no tasks found"]
else:
pNames = [True, {"metadata": dir, "workspace_access": self._checkWorkspaceAccess() } ]
except Exception, e:
# pNames = [False, str(e) + "\n" + "".join(traceback.format_tb(sys.exc_info()[2]))]
pNames = [False, str(e) + "\n" + self._shortHTMLBacktrace(sys.exc_info())]
return pNames
# "Loads" a file, in other words, converts it into an object suitable
# for rendering to JSON.
def load(self, file_type = None, encoding = None, **kw):
result = {"success": True,
"error": None}
success, errStr, res = self._checkTaskInformation([], file_type = file_type,
workflowCanBeNull = True,
encoding = encoding, **kw)
if not success:
result["success"] = False
result["error"] = errStr
else:
ignore, ignore, TASK_OBJ, INPUT, WORKFLOW = res
FILE_TYPE = _getfirst(file_type)
ENCODING = _getfirst(encoding)
# Convert the CGI keywords into useful keyword arguments.
ioCls = MAT.DocumentIO.getDocumentIOClass(FILE_TYPE)
form = FakeFieldStorage(kw.copy())
aggregator = MAT.Operation.CGIOpArgumentAggregator(form)
ioCls.addInputOptions(aggregator)
pDir = aggregator.extract()
ioObj = ioCls(encoding = ENCODING, task = TASK_OBJ, **pDir)
try:
INPUT = ioObj.readFromByteSequence(INPUT)
result["doc"] = _jsonIO.renderJSONObj(INPUT)
except (MAT.Document.LoadError, MAT.Annotation.AnnotationError), e:
result["success"] = False
result["error"] = str(e)
except LookupError, e:
result["success"] = False
result["error"] = str(e)
return result
# steps goes forward, undo_through goes backward. They have almost
# the same signature and procedure, except steps has steps and
# undo_through has undo_through. Duh. undo_through won't call
# setSuccess.
def steps(self, steps = None, undo_through = None, **kw):
OutputObj = {"error": None,
"errorStep": None,
"successes": []}
STEPS = []
v = _getfirst(steps)
if v:
STEPS = v.split(",")
UNDO_THROUGH = None
INPUT = self._stepsCore(OutputObj, STEPS, UNDO_THROUGH, **kw)
# Make sure all the annotated documents are encoded.
for entry in OutputObj["successes"]:
if isinstance(entry["val"], MAT.Document.AnnotatedDoc):
entry["val"] = _jsonIO.renderJSONObj(entry["val"])
return OutputObj
def undo_through(self, steps = None, undo_through = None, **kw):
OutputObj = {"error": None,
"errorStep": None,
"stepsUndone": []}
STEPS = []
UNDO_THROUGH = _getfirst(undo_through)
INPUT = self._stepsCore(OutputObj, STEPS, UNDO_THROUGH, **kw)
# Insert the modified document.
OutputObj["doc"] = _jsonIO.renderJSONObj(INPUT)
return OutputObj
def _stepsCore(self, outputObj, steps, undoThrough, **kw):
STEPS = steps
UNDO_THROUGH = undoThrough
OutputObj = outputObj
def setError(obj, err, step):
obj["error"] = err
obj["errorStep"] = step
LoadFailed = False
success, errStr, res = self._checkTaskInformation(STEPS, **kw)
if not success:
setError(OutputObj, errStr, "[init]")
LoadFailed = True
INPUT = None
if not LoadFailed:
plugins, pDir, TASK_OBJ, INPUT, WORKFLOW = res
# Here's the output format. It's a hash of three elements: error (None if
# there's no error), errorStep (None if there's no error), and
# a list of success hashes, which have a val and steps.
# See OutputObj above. An error always terminates the processing,
# so on the client, you process the successes and then the error.
# The steps should be in order of execution, and so should the
# successes. It's not EXACTLY enforced.
from MAT.ToolChain import MATEngine
class CGIMATEngine(MATEngine):
# Ignore the possibility of batch processing for the moment.
def __init__(self, oObj, *args, **kw):
self.oObj = oObj
MATEngine.__init__(self, *args, **kw)
def ReportStepResult(self, stepObj, fname, iData):
stepName = stepObj.stepName
# Errors are raised if the step isn't successful.
# If the step is a multi-step, the steps it's a
# proxy for must also be reported; otherwise
# the front end won't capture that those "true" steps
# were created, and we'll encounter a bug where
# you can't undo the multistep unless there's been
# an explicit special multistep undo class introduced.
# But this is only necessary for the forward
# direction. Well, maybe not.
steps = [stepName]
if isinstance(stepObj, MAT.PluginMgr.MultiStep):
steps += [p.stepName for p in stepObj.proxies]
obj = self.oObj
for entry in obj["successes"]:
if iData is entry["val"]:
entry["steps"] += steps
return
obj["successes"].append({"val": iData, "steps": steps})
def ReportBatchUndoStepResult(self, stepObj, iDataPairs):
steps = [stepObj.stepName]
if isinstance(stepObj, MAT.PluginMgr.MultiStep):
steps += [p.stepName for p in stepObj.proxies]
self.oObj["stepsUndone"] += steps
try:
INPUT = _jsonIO.readFromByteSequence(INPUT, taskSeed = TASK_OBJ)
except MAT.Document.LoadError, e:
LoadFailed = True
setError(OutputObj, str(e), "[init]")
if not LoadFailed:
try:
engine = CGIMATEngine(OutputObj, taskObj = TASK_OBJ,
workflow = WORKFLOW)
engine.RunDataPairs([("<cgi>", INPUT)], steps = STEPS[:],
pluginDir = plugins, undoThrough = UNDO_THROUGH, **pDir)
except MAT.Error.MATError, e:
if e.errstr == "":
errstr = "<no information>"
elif e.errstr is None:
errstr = "<unknown>"
else:
errstr = str(e.errstr)
#import traceback
#errstr = errstr + traceback.format_exc()
setError(OutputObj, errstr, e.phase)
return INPUT
def document_reconciliation(self, **kw):
result = {"success": True,
"error": None}
success, errStr, res = self._checkTaskInformation([], workflowCanBeNull = True, **kw)
if not success:
result["success"] = False
result["error"] = errStr
else:
plugins, pDir, TASK_OBJ, INPUT, WORKFLOW = res
# INPUT is a string which is a LIST of document JSON objects.
from MAT import json
docs = None
try:
docs = []
for d in json.loads(INPUT.decode('utf-8')):
doc = TASK_OBJ.newDocument()
docs.append(doc)
_jsonIO._deserializeFromJSON(d, doc)
except MAT.Document.LoadError, e:
result["success"] = False
result["error"] = str(e)
if docs is not None:
# So here, what we do is create a reconciliation document. The issue with
# this is that we need to figure out which portions of the incoming
# documents should be considered "gold". We may want an option to
# preserve the incoming segmentation, and otherwise just assign a single
# document-size segment with the document itself as the annotator.
# We'll have two options to save: either to save this directly as
# a reconciliation document, or to export it as a reconciled document.
# The other issue is what happens when we load - the reconciliation document
# should automatically open a reconciliation pane, which means that
# we'd need to deal with the panes in the load callback, rather than
# in the load prep. But otherwise, how do I load a reconciliation document?
# I'd need a separate menu item. Hmmm.
from MAT.ReconciliationDocument import ReconciliationDoc
# Preprocess the documents. In this case, all the documents
# must be marked human gold, and the annotator should be
# the document itself.
i = 1
wholeZoneStep = None
for doc in docs:
annotator = "doc" + str(i)
i += 1
segs = doc.getAnnotations(["SEGMENT"])
if not segs:
zones = doc.getAnnotations(TASK_OBJ.getAnnotationTypesByCategory("zone"))
if zones:
# Segment it.
for z in zones:
doc.createAnnotation(z.start, z.end, "SEGMENT",
{"annotator": annotator, "status": "human gold"})
else:
# If there are no zones and no segments, then make one big zone and segment.
if not wholeZoneStep:
wholeZoneStep = MAT.PluginMgr.WholeZoneStep("zone", TASK_OBJ, None)
wholeZoneStep.do(doc)
for seg in doc.getAnnotations(["SEGMENT"]):
seg["annotator"] = annotator
seg["status"] = "human gold"
else:
for seg in segs:
seg["annotator"] = annotator
seg["status"] = "human gold"
recDoc = ReconciliationDoc.generateReconciliationDocument(TASK_OBJ, docs, verbose = None)
# Now, every segment in the reconciliation doc should be marked "to review".
# NO! Only the segments which are human gold.
for seg in recDoc.getAnnotations(["SEGMENT"]):
if seg["status"] == "human gold":
seg["to_review"] = "yes"
result["doc"] = _jsonIO.renderJSONObj(recDoc)
return result
def document_comparison(self, labels = None, **kw):
result = {"success": True,
"error": None}
success, errStr, res = self._checkTaskInformation([], workflowCanBeNull = True, **kw)
if not success:
result["success"] = False
result["error"] = errStr
else:
plugins, pDir, TASK_OBJ, INPUT, WORKFLOW = res
# INPUT is a string which is a LIST of document JSON objects.
from MAT import json
docs = None
try:
docs = []
for d in json.loads(INPUT.decode('utf-8')):
doc = TASK_OBJ.newDocument()
docs.append(doc)
_jsonIO._deserializeFromJSON(d, doc)
except MAT.Document.LoadError, e:
result["success"] = False
result["error"] = str(e)
if docs is not None:
# So here, what we do is create a comparison document.
from MAT.ComparisonDocument import generateComparisonDocument
pivotLabel = None
otherLabels = None
if labels:
pivotLabel = labels[0]
otherLabels = labels[1:]
try:
compDoc = generateComparisonDocument(TASK_OBJ, docs[0], docs[1:],
pivotLabel = pivotLabel, otherLabels = otherLabels)
result["doc"] = _jsonIO.renderJSONObj(compDoc)
except MAT.Pair.PairingError, e:
result["success"] = False
result["error"] = str(e)
return result
# Now, the workspace stuff. I don't want to keep duplicating code,
# so I'm going to refactor this so that the checks are progressive.
# First, we need to check the workspace. If that passes, we may need
# to check the folder, and if that passes, we may need to check the
# file. Then there's creating the appropriate field storage.
def open_workspace(self, user = None, **kw):
wsInfo = WSInfo(self, **kw)
if not wsInfo.success:
result = {"success": False,
"error": wsInfo.error}
else:
# First, we have to check if the workspace is being
# opened with a user. If it's being opened read-only,
# you don't need one, but if not, you do, and the
# user has to be registered. I check it here and not in
# WSInfo because the logic is dealt with separately
# when a document is opened.
if user:
user = user.strip()
if user and (not wsInfo.workspace.getDB().userIsRegistered(user)):
result = {"success": False,
"error": "User '%s' is not registered in workspace" % user}
elif (not user) and (not wsInfo.readOnly):
# Can omit a user only if it's read-only.
result = {"success": False,
"error": "No user provided for writeable workspace"}
else:
# We need to notify the frontend of the appropriate
# display config, because it turns out that we're
# not going down to the leaves here. In order to make
# this work, I've imposed a restriction that the display
# config can't be reset within the scope of a visible
# task.
result = {"success": True,
"workspace_dir": wsInfo.wsDirSuffix,
"logging_enabled": wsInfo.workspace.loggingEnabled,
"task": wsInfo.workspace.task.name}
return result
def list_workspace_folder(self, **kw):
wsInfo = WSInfo(self, checkFolder = True, **kw)
if not wsInfo.success:
result = {"success": False,
"error": wsInfo.error}
else:
folder = wsInfo.folder
try:
result = wsInfo.workspace.runOperation("list", (folder,), fromCmdline = False,
resultFormat = MAT.Workspace.WEB_RESULT)
except MAT.Workspace.WorkspaceError, e:
result = {"success": False,
"error": str(e)}
return result
def open_workspace_file(self, **kw):
wsInfo = WSInfo(self, checkFolder = True, checkFile = True, **kw)
if not wsInfo.success:
result = {"success": False,
"error": wsInfo.error}
else:
# Get a document.
folder = wsInfo.folder
file = wsInfo.file
try:
result = wsInfo.workspace.runOperation("open_file", (folder, file),
fromCmdline = False,
resultFormat = MAT.Workspace.WEB_RESULT,
aggregator = wsInfo.aggregator,
read_only = wsInfo.readOnly,
**wsInfo.params)
except MAT.Workspace.WorkspaceError, e:
result = {"success": False,
"error": str(e)}
return result
# Doc here is a MAT-JSON document. So the encoding argument
# is irrelevant.
def import_into_workspace(self, doc = None, **kw):
wsInfo = WSInfo(self, checkFolder = True, checkFile = True, **kw)
if not wsInfo.success:
return {"success": False,
"error": wsInfo.error}
# This is a bit awful, since I'm making a temporary
# directory just so I can cache this file, because the
# logic of the workspace file import would be awfull
# hard to unwind. But whatever.
# Get a document.
file = wsInfo.file
folder = wsInfo.folder
doc = _getfirst(doc)
if doc is None:
return {"success": False,
"error": "No document content specified."}
with MAT.ExecutionContext.Tmpdir(preserveTempfiles = False) as tmpDir:
import codecs
# The cleanest way to do this is to create a temp directory,
# save the file under the basename, and then import it into
# this folder.
outPath = os.path.join(tmpDir, file)
# It's already a UTF-8 encoded byte sequence.
fp = codecs.open(outPath, "w", "utf-8")
fp.write(doc.decode("utf-8"))
fp.close()
from MAT import json
try:
return wsInfo.workspace.runOperation("import", (folder, outPath),
aggregator = wsInfo.aggregator,
resultFormat = MAT.Workspace.WEB_RESULT,
fromCmdline = False,
**wsInfo.params)
except MAT.Workspace.WorkspaceError, e:
return {"success": False,
"error": str(e)}
# This is kind of hideous. My only options for ordered arguments, which
# the toplevel workspace operation has, is to have either a JSON-encoded list
# object or to do arg1, etc...
# This result may contain:
# affected_folders, target, doc (import)
# files (list)
# doc (open)
def do_toplevel_workspace_operation(self, ws_operation = None, **kw):
wsInfo = WSInfo(self, **kw)
if not wsInfo.success:
return {"success": False,
"error": wsInfo.error}
else:
try:
wsOperation = _getfirst(ws_operation)
w = wsInfo.workspace
args = []
i = 1
while True:
v = wsInfo.params.get("arg"+str(i))
if v is None:
break
args.append(v)
del wsInfo.params["arg"+str(i)]
i += 1
return w.runOperation(wsOperation, args, aggregator = wsInfo.aggregator,
resultFormat = MAT.Workspace.WEB_RESULT, fromCmdline = False,
**wsInfo.params)
except (MAT.Workspace.WorkspaceError, MAT.Operation.OperationError), e:
return {"success": False,
"error": str(e)}
except MAT.ToolChain.ConfigurationError, (engine, e):
return {"success": False,
"error": str(e)}
def do_workspace_operation(self, ws_operation = None, **kw):
wsInfo = WSInfo(self, checkFolder = True, checkFile = True, **kw)
if not wsInfo.success:
result = {"success": False,
"error": wsInfo.error}
else:
try:
wsOperation = _getfirst(ws_operation)
basenames = [wsInfo.file]
folder = wsInfo.folder
w = wsInfo.workspace
result = w.runFolderOperation(folder, wsOperation, aggregator = wsInfo.aggregator,
basenames = basenames,
resultFormat = MAT.Workspace.WEB_RESULT,
**wsInfo.params)
except (MAT.Workspace.WorkspaceError, MAT.Operation.OperationError), e:
result = {"success": False,
"error": str(e)}
except MAT.ToolChain.ConfigurationError, (engine, e):
result = {"success": False,
"error": str(e)}
return result
# Let's do an arbitrarily defined operation in a task.
def do_task_operation(self, task = None, task_operation = None, **kw):
TASK = _getfirst(task)
if TASK is None:
return {"success": False, "error": "task not specified"}
task_operation = _getfirst(task_operation)
if task_operation is None:
return {"success": False, "error": "task operation not specified"}
plugins = self.plugins
literalTaskObj = plugins.getTask(TASK)
if literalTaskObj is None:
return {"success": False,
"error": "task %s not found" % TASK}
# Now, let's see if we've defined the web operation.
if not hasattr(literalTaskObj, task_operation):
return {"success": False,
"error": "task object does not have the %s attribute" % task_operation}
meth = getattr(literalTaskObj, task_operation)
import types
if type(meth) is not types.MethodType:
return {"success": False,
"error": "task object attribute %s is not a method" % task_operation}
if not getattr(meth, "web_operation", False):
return {"success": False,
"error": "task method %s is not a web operation" % task_operation}
try:
return {"success": True,
"result": meth(**kw)}
except Exception, e:
return {"success": False,
"error": "task method encountered an error: " + str(e)}
#
# Handling documentation. This is used both in the live
# CherryPy provision and when generating static documentation.
#
# Toplevel document transformer.
def enhanceRootDocIndex(matPkgRoot, taskDirs = None):
path = os.path.join(matPkgRoot, "web", "htdocs", "doc", "html", "index.html")
fp = open(path, "r")
s = fp.read()
fp.close()
# When I use this when I'm generating my static apps, I need to
# look at the apps I'm including, not the apps that are installed.
def _enhanceString(p, s, taskDirs = None):
# Now, get the plugins, and traverse down from the root. Every time
# you find a class with a docEnhancementClass, instantiate and process.
if p.docEnhancementClass and ((taskDirs is None) or (p.taskRoot in taskDirs)):
e = p.docEnhancementClass(os.path.basename(p.taskRoot), s)
e.process()
s = e.finish()
for child in p.children:
s = _enhanceString(child, s, taskDirs = taskDirs)
return s
if taskDirs is None:
s = _enhanceString(MAT.PluginMgr.LoadPlugins().getRootTask(), s)
else:
# Create a new plugins directory. Make sure these taskdirs
# are included, and then limit the taskdirs as you recurse.
pDict = MAT.PluginMgr.LoadPlugins(*taskDirs)
s = _enhanceString(pDict.getRootTask(), s,
taskDirs = [os.path.realpath(os.path.abspath(path)) for path in taskDirs])
# If there's a BUNDLE_LICENSE in matPkgRoot, make a link
# to it available.
if os.path.exists(os.path.join(matPkgRoot, "BUNDLE_LICENSE")):
s = s.replace("class='invisible bundle_license'", "class='bundle_license'", 1)
return s
# And here's the static document creator, in case I need it somewhere other
# than the installer.
import shutil
def createStaticDocumentTree(matPkgRoot, taskRoots, jCarafeRoot, targetRoot):
shutil.copytree(os.path.join(matPkgRoot, "web", "htdocs", "doc"), targetRoot)
# Enhance the root.
s = enhanceRootDocIndex(matPkgRoot, taskDirs = taskRoots)
fp = open(os.path.join(targetRoot, "html", "index.html"), "w")
fp.write(s)
fp.close()
# Copy the license.
shutil.copy(os.path.join(matPkgRoot, "LICENSE"),
os.path.join(targetRoot, "html"))
# This will be true of MAT directories in bundled distributions.
if os.path.exists(os.path.join(matPkgRoot, "BUNDLE_LICENSE")):
shutil.copy(os.path.join(matPkgRoot, "BUNDLE_LICENSE"),
os.path.join(targetRoot, "html"))
# Copy the jCarafe documentation, since the Web pages
# refer to it.
os.makedirs(os.path.join(targetRoot, "html", "jcarafe_resources"))
shutil.copy(os.path.join(jCarafeRoot, "resources", "jCarafeUsersGuide.pdf"),
os.path.join(targetRoot, "html", "jcarafe_resources"))
for taskRoot in taskRoots:
if os.path.isdir(os.path.join(taskRoot, "doc")):
shutil.copytree(os.path.join(taskRoot, "doc"),
os.path.join(targetRoot, "html", "tasks", os.path.basename(taskRoot), "doc"))
|
|
'''
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import errno
import sys
import json
import argparse
import unittest
import re
import requests
import subprocess
from os import linesep as LINESEP
from os import path
# backward compatibility with 2.x
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
import rados
except ModuleNotFoundError as noModErr:
print("Error: %s\nExiting the script..." % noModErr)
sys.exit(1)
try:
import rbd
except ModuleNotFoundError as noModErr:
print("Error: %s\nExiting the script..." % noModErr)
sys.exit(1)
try:
# for 2.7.x
from StringIO import StringIO
except ModuleNotFoundError:
# for 3.x
from io import StringIO
try:
# for 2.7.x
from urlparse import urlparse
except ModuleNotFoundError:
# for 3.x
from urllib.parse import urlparse
class ExecutionFailureException(Exception):
pass
################################################
################## DummyRados ##################
################################################
# this is mainly for testing and could be used where 'rados' is not available
class DummyRados(object):
def __init__(self):
self.return_val = 0
self.err_message = ''
self.state = 'connected'
self.cmd_output_map = {}
self.cmd_names = {}
self._init_cmd_output_map()
self.dummy_host_ip_map = {}
def _init_cmd_output_map(self):
json_file_name = 'test-data/ceph-status-out'
script_dir = path.abspath(path.dirname(__file__))
ceph_status_str = ""
with open(path.join(script_dir, json_file_name), 'r') as json_file:
ceph_status_str = json_file.read()
self.cmd_names['fs ls'] = '''{"format": "json", "prefix": "fs ls"}'''
self.cmd_names['quorum_status'] = '''{"format": "json", "prefix": "quorum_status"}'''
self.cmd_names['mgr services'] = '''{"format": "json", "prefix": "mgr services"}'''
# all the commands and their output
self.cmd_output_map[self.cmd_names['fs ls']
] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-replicated"]}]'''
self.cmd_output_map[self.cmd_names['quorum_status']] = '''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}'''
self.cmd_output_map[self.cmd_names['mgr services']
] = '''{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs *=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner-openshift-storage","key":"BQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner-openshift-storage-myfs","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=myfs"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"format": "json", "prefix": "mgr services"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}'''
self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}'''
self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get"}'''] = '''[]'''
self.cmd_output_map['''{"entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get"}'''] = '''[]'''
self.cmd_output_map['''{"entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get"}'''] = '''[]'''
self.cmd_output_map['''{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}'''] = '''[]'''
self.cmd_output_map['''{"entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get"}'''] = '''[]'''
self.cmd_output_map['''{"entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get"}'''] = '''[]'''
self.cmd_output_map['''{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth caps"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command 'osd blocklist'", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]'''
self.cmd_output_map['{"format": "json", "prefix": "status"}'] = ceph_status_str
def shutdown(self):
pass
def get_fsid(self):
return 'af4e1673-0b72-402d-990a-22d2919d0f1c'
def conf_read_file(self):
pass
def connect(self):
pass
def pool_exists(self, pool_name):
return True
def mon_command(self, cmd, out):
json_cmd = json.loads(cmd)
json_cmd_str = json.dumps(json_cmd, sort_keys=True)
cmd_output = self.cmd_output_map[json_cmd_str]
return self.return_val, \
cmd_output, \
"{}".format(self.err_message).encode('utf-8')
def _convert_hostname_to_ip(self, host_name):
ip_reg_x = re.compile(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}')
# if provided host is directly an IP address, return the same
if ip_reg_x.match(host_name):
return host_name
import random
host_ip = self.dummy_host_ip_map.get(host_name, "")
if not host_ip:
host_ip = "172.9.{}.{}".format(
random.randint(0, 254), random.randint(0, 254))
self.dummy_host_ip_map[host_name] = host_ip
del random
return host_ip
@classmethod
def Rados(conffile=None):
return DummyRados()
class RadosJSON:
EXTERNAL_USER_NAME = "client.healthchecker"
EXTERNAL_RGW_ADMIN_OPS_USER_NAME = "rgw-admin-ops-user"
EMPTY_OUTPUT_LIST = "Empty output list"
DEFAULT_RGW_POOL_PREFIX = "default"
DEFAULT_MONITORING_ENDPOINT_PORT = "9283"
@classmethod
def gen_arg_parser(cls, args_to_parse=None):
argP = argparse.ArgumentParser()
common_group = argP.add_argument_group('common')
common_group.add_argument("--verbose", "-v",
action='store_true', default=False)
common_group.add_argument("--ceph-conf", "-c",
help="Provide a ceph conf file.", type=str)
common_group.add_argument("--run-as-user", "-u", default="", type=str,
help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'")
common_group.add_argument("--cluster-name", default="",
help="Ceph cluster name")
common_group.add_argument("--namespace", default="",
help="Namespace where CephCluster is running")
common_group.add_argument("--rgw-pool-prefix", default="",
help="RGW Pool prefix")
common_group.add_argument("--restricted-auth-permission", default=False,
help="Restricted cephCSIKeyrings auth permissions to specific pools, cluster and pool namespaces." +
"Mandatory flags that need to be set are --rbd-data-pool-name, --rados-namespace and --cluster-name." +
"--cephfs-filesystem-name flag can also be passed in case of cephfs user restriction, so it can restrict user to particular cephfs filesystem" +
"sample run: `python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --rados-namespace radosNamespace --cluster-name rookStorage --restricted-auth-permission true`" +
"Note: Restricting the users per pool, per cluster and per pool namespace will require to create new users and new secrets for that users.")
output_group = argP.add_argument_group('output')
output_group.add_argument("--format", "-t", choices=["json", "bash"],
default='json', help="Provides the output format (json | bash)")
output_group.add_argument("--output", "-o", default="",
help="Output will be stored into the provided file")
output_group.add_argument("--cephfs-filesystem-name", default="",
help="Provides the name of the Ceph filesystem")
output_group.add_argument("--cephfs-metadata-pool-name", default="",
help="Provides the name of the cephfs metadata pool")
output_group.add_argument("--cephfs-data-pool-name", default="",
help="Provides the name of the cephfs data pool")
output_group.add_argument("--rbd-data-pool-name", default="", required=False,
help="Provides the name of the RBD datapool")
output_group.add_argument("--rgw-endpoint", default="", required=False,
help="Rados GateWay endpoint (in <IP>:<PORT> format)")
output_group.add_argument("--rgw-tls-cert-path", default="", required=False,
help="Rados GateWay endpoint TLS certificate")
output_group.add_argument("--rgw-skip-tls", required=False, default=False,
help="Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED")
output_group.add_argument("--monitoring-endpoint", default="", required=False,
help="Ceph Manager prometheus exporter endpoints (comma separated list of <IP> entries of active and standby mgrs)")
output_group.add_argument("--monitoring-endpoint-port", default="", required=False,
help="Ceph Manager prometheus exporter port")
output_group.add_argument("--rbd-metadata-ec-pool-name", default="", required=False,
help="Provides the name of erasure coded RBD metadata pool")
output_group.add_argument("--dry-run", default=False, action='store_true',
help="Dry run prints the executed commands without running them")
output_group.add_argument("--rados-namespace", default="", required=False,
help="divides a pool into separate logical namespaces")
upgrade_group = argP.add_argument_group('upgrade')
upgrade_group.add_argument("--upgrade", action='store_true', default=False,
help="Upgrades the 'csi-user'(For example: client.csi-cephfs-provisioner) with new permissions needed for the new cluster version and older permission will still be applied." +
"Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade`, this will upgrade all the default csi users(non-restricted)" +
"For restricted users(For example: client.csi-cephfs-provisioner-openshift-storage-myfs), users created using --restricted-auth-permission flag need to pass mandatory flags" +
"mandatory flags: '--rbd-data-pool-name, --rados-namespace, --cluster-name and --run-as-user' flags while upgrading" +
"in case of cephfs users if you have passed --cephfs-filesystem-name flag while creating user then while upgrading it will be mandatory too" +
"Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --rados-namespace radosNamespace --cluster-name rookStorage --run-as-user client.csi-rbd-node-rookStorage-replicapool-radosNamespace`" +
"PS: An existing non-restricted user cannot be downgraded to a restricted user by upgrading. Admin need to create a new restricted user for this by re-running the script." +
"Upgrade flag should only be used to append new permissions to users, it shouldn't be used for changing user already applied permission, for example you shouldn't change in which pool user has access")
if args_to_parse:
assert type(args_to_parse) == list, \
"Argument to 'gen_arg_parser' should be a list"
else:
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)
def validate_rgw_metadata_ec_pool_name(self):
if self._arg_parser.rbd_metadata_ec_pool_name:
rbd_metadata_ec_pool_name = self._arg_parser.rbd_metadata_ec_pool_name
rbd_pool_name = self._arg_parser.rbd_data_pool_name
if rbd_pool_name == "":
raise ExecutionFailureException(
"Flag '--rbd-data-pool-name' should not be empty"
)
if rbd_metadata_ec_pool_name == "":
raise ExecutionFailureException(
"Flag '--rbd-metadata-ec-pool-name' should not be empty"
)
cmd_json = {
"prefix": "osd dump", "format": "json"
}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"{}".format(cmd_json['prefix']) + " command failed.\n" +
"Error: {}".format(err_msg if ret_val !=
0 else self.EMPTY_OUTPUT_LIST)
)
metadata_pool_exist, pool_exist = False, False
for key in json_out['pools']:
# if erasure_code_profile is empty and pool name exists then it replica pool
if key['erasure_code_profile'] == "" and key['pool_name'] == rbd_metadata_ec_pool_name:
metadata_pool_exist = True
# if erasure_code_profile is not empty and pool name exists then it is ec pool
if key['erasure_code_profile'] and key['pool_name'] == rbd_pool_name:
pool_exist = True
if not metadata_pool_exist:
raise ExecutionFailureException(
"Provided rbd_ec_metadata_pool name, {}, does not exist".format(rbd_metadata_ec_pool_name))
if not pool_exist:
raise ExecutionFailureException(
"Provided rbd_data_pool name, {}, does not exist".format(rbd_pool_name))
return rbd_metadata_ec_pool_name
def dry_run(self, msg):
if self._arg_parser.dry_run:
print("Execute: " + "'" + msg + "'")
def validate_rgw_endpoint_tls_cert(self):
if self._arg_parser.rgw_tls_cert_path:
with open(self._arg_parser.rgw_tls_cert_path, encoding='utf8') as f:
contents = f.read()
return contents.rstrip()
def _check_conflicting_options(self):
if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified")
def _invalid_endpoint(self, endpoint_str):
try:
ipv4, port = endpoint_str.split(':')
except ValueError:
raise ExecutionFailureException(
"Not a proper endpoint: {}, <IPv4>:<PORT>, format is expected".format(endpoint_str))
ipParts = ipv4.split('.')
if len(ipParts) != 4:
raise ExecutionFailureException(
"Not a valid IP address: {}".format(ipv4))
for eachPart in ipParts:
if not eachPart.isdigit():
raise ExecutionFailureException(
"IP address parts should be numbers: {}".format(ipv4))
intPart = int(eachPart)
if intPart < 0 or intPart > 254:
raise ExecutionFailureException(
"Out of range IP addresses: {}".format(ipv4))
if not port.isdigit():
raise ExecutionFailureException("Port not valid: {}".format(port))
intPort = int(port)
if intPort < 1 or intPort > 2**16-1:
raise ExecutionFailureException(
"Out of range port number: {}".format(port))
return False
def endpoint_dial(self, endpoint_str, timeout=3, cert=None):
# if the 'cluster' instance is a dummy one,
# don't try to reach out to the endpoint
if isinstance(self.cluster, DummyRados):
return
protocols = ["http", "https"]
for prefix in protocols:
try:
ep = "{}://{}".format(prefix, endpoint_str)
# If verify is set to a path to a directory,
# the directory must have been processed using the c_rehash utility supplied with OpenSSL.
if prefix == "https" and cert and self._arg_parser.rgw_skip_tls:
r = requests.head(ep, timeout=timeout, verify=False)
elif prefix == "https" and cert:
r = requests.head(ep, timeout=timeout, verify=cert)
else:
r = requests.head(ep, timeout=timeout)
if r.status_code == 200:
return
except:
continue
raise ExecutionFailureException(
"unable to connect to endpoint: {}".format(endpoint_str))
def __init__(self, arg_list=None):
self.out_map = {}
self._excluded_keys = set()
self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
self._check_conflicting_options()
self.run_as_user = self._arg_parser.run_as_user
self.output_file = self._arg_parser.output
self.ceph_conf = self._arg_parser.ceph_conf
self.MIN_USER_CAP_PERMISSIONS = {
'mgr': 'allow command config',
'mon': 'allow r, allow command quorum_status, allow command version',
'osd': "allow rwx pool={0}.rgw.meta, " +
"allow r pool=.rgw.root, " +
"allow rw pool={0}.rgw.control, " +
"allow rx pool={0}.rgw.log, " +
"allow x pool={0}.rgw.buckets.index"
}
# if user not provided, give a default user
if not self.run_as_user and not self._arg_parser.upgrade:
self.run_as_user = self.EXTERNAL_USER_NAME
if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade:
self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX
if self.ceph_conf:
self.cluster = rados.Rados(conffile=self.ceph_conf)
else:
self.cluster = rados.Rados()
self.cluster.conf_read_file()
self.cluster.connect()
def shutdown(self):
if self.cluster.state == "connected":
self.cluster.shutdown()
def get_fsid(self):
if self._arg_parser.dry_run:
return self.dry_run("ceph fsid")
return str(self.cluster.get_fsid())
def _common_cmd_json_gen(self, cmd_json):
cmd = json.dumps(cmd_json, sort_keys=True)
ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b'')
if self._arg_parser.verbose:
print("Command Input: {}".format(cmd))
print("Return Val: {}\nCommand Output: {}\nError Message: {}\n----------\n".format(
ret_val, cmd_out, err_msg))
json_out = {}
# if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty
# then convert 'cmd_out' to a json output
if ret_val == 0 and cmd_out:
json_out = json.loads(cmd_out)
return ret_val, json_out, err_msg
def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'quorum_status' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
q_leader_name = json_out['quorum_leader_name']
q_leader_details = {}
q_leader_matching_list = [l for l in json_out['monmap']['mons']
if l['name'] == q_leader_name]
if len(q_leader_matching_list) == 0:
raise ExecutionFailureException("No matching 'mon' details found")
q_leader_details = q_leader_matching_list[0]
# get the address vector of the quorum-leader
q_leader_addrvec = q_leader_details.get(
'public_addrs', {}).get('addrvec', [])
# if the quorum-leader has only one address in the address-vector
# and it is of type 'v2' (ie; with <IP>:3300),
# raise an exception to make user aware that
# they have to enable 'v1' (ie; with <IP>:6789) type as well
if len(q_leader_addrvec) == 1 and q_leader_addrvec[0]['type'] == 'v2':
raise ExecutionFailureException(
"Only 'v2' address type is enabled, user should also enable 'v1' type as well")
ip_port = str(q_leader_details['public_addr'].split('/')[0])
return "{}={}".format(str(q_leader_name), ip_port)
def _join_host_port(self, endpoint, port):
port = "{}".format(port)
# regex to check the given endpoint is enclosed in square brackets
ipv6_regx = re.compile(r'^\[[^]]*\]$')
# endpoint has ':' in it and if not (already) enclosed in square brackets
if endpoint.count(':') and not ipv6_regx.match(endpoint):
endpoint = '[{}]'.format(endpoint)
if not port:
return endpoint
return ':'.join([endpoint, port])
def _convert_hostname_to_ip(self, host_name):
# if 'cluster' instance is a dummy type,
# call the dummy instance's "convert" method
if not host_name:
raise ExecutionFailureException("Empty hostname provided")
if isinstance(self.cluster, DummyRados):
return self.cluster._convert_hostname_to_ip(host_name)
import socket
ip = socket.gethostbyname(host_name)
del socket
return ip
def get_active_and_standby_mgrs(self):
if self._arg_parser.dry_run:
return "", self.dry_run("ceph status")
monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint
standby_mgrs = []
if not monitoring_endpoint_ip_list:
cmd_json = {"prefix": "status", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'mgr services' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
monitoring_endpoint = json_out.get('mgrmap', {}).get(
'services', {}).get('prometheus', '')
if not monitoring_endpoint:
raise ExecutionFailureException(
"'prometheus' service not found, is the exporter enabled?'.\n")
# now check the stand-by mgr-s
standby_arr = json_out.get('mgrmap', {}).get('standbys', [])
for each_standby in standby_arr:
if 'name' in each_standby.keys():
standby_mgrs.append(each_standby['name'])
try:
parsed_endpoint = urlparse(monitoring_endpoint)
except ValueError:
raise ExecutionFailureException(
"invalid endpoint: {}".format(monitoring_endpoint))
monitoring_endpoint_ip_list = parsed_endpoint.hostname
if not monitoring_endpoint_port:
monitoring_endpoint_port = "{}".format(parsed_endpoint.port)
# if monitoring endpoint port is not set, put a default mon port
if not monitoring_endpoint_port:
monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT
# user could give comma and space separated inputs (like --monitoring-endpoint="<ip1>, <ip2>")
monitoring_endpoint_ip_list = monitoring_endpoint_ip_list.replace(
",", " ")
monitoring_endpoint_ip_list_split = monitoring_endpoint_ip_list.split()
# if monitoring-endpoint could not be found, raise an error
if len(monitoring_endpoint_ip_list_split) == 0:
raise ExecutionFailureException("No 'monitoring-endpoint' found")
# first ip is treated as the main monitoring-endpoint
monitoring_endpoint_ip = monitoring_endpoint_ip_list_split[0]
# rest of the ip-s are added to the 'standby_mgrs' list
standby_mgrs.extend(monitoring_endpoint_ip_list_split[1:])
try:
failed_ip = monitoring_endpoint_ip
monitoring_endpoint_ip = self._convert_hostname_to_ip(
monitoring_endpoint_ip)
# collect all the 'stand-by' mgr ips
mgr_ips = []
for each_standby_mgr in standby_mgrs:
failed_ip = each_standby_mgr
mgr_ips.append(
self._convert_hostname_to_ip(each_standby_mgr))
except:
raise ExecutionFailureException(
"Conversion of host: {} to IP failed. "
"Please enter the IP addresses of all the ceph-mgrs with the '--monitoring-endpoint' flag".format(failed_ip))
monitoring_endpoint = self._join_host_port(
monitoring_endpoint_ip, monitoring_endpoint_port)
self._invalid_endpoint(monitoring_endpoint)
self.endpoint_dial(monitoring_endpoint)
# add the validated active mgr IP into the first index
mgr_ips.insert(0, monitoring_endpoint_ip)
all_mgr_ips_str = ",".join(mgr_ips)
return all_mgr_ips_str, monitoring_endpoint_port
def check_user_exist(self, user):
cmd_json = {"prefix": "auth get", "entity": "{}".format(
user), "format": "json"}
ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
if ret_val != 0 or len(json_out) == 0:
return ""
return str(json_out[0]['key'])
def get_cephfs_provisioner_caps_and_entity(self):
entity = "client.csi-cephfs-provisioner"
caps = {"mon": "allow r, allow command 'osd blocklist'",
"mgr": "allow rw",
"osd": "allow rw tag cephfs metadata=*"}
if self._arg_parser.restricted_auth_permission:
cluster_name = self._arg_parser.cluster_name
if cluster_name == "":
raise ExecutionFailureException(
"cluster_name not found, please set the '--cluster-name' flag")
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
if cephfs_filesystem == "":
entity = "{}-{}".format(entity, cluster_name)
else:
entity = "{}-{}-{}".format(entity,
cluster_name, cephfs_filesystem)
caps["osd"] = "allow rw tag cephfs metadata={}".format(
cephfs_filesystem)
return caps, entity
def get_cephfs_node_caps_and_entity(self):
entity = "client.csi-cephfs-node"
caps = {"mon": "allow r, allow command 'osd blocklist'",
"mgr": "allow rw",
"osd": "allow rw tag cephfs *=*",
"mds": "allow rw"}
if self._arg_parser.restricted_auth_permission:
cluster_name = self._arg_parser.cluster_name
if cluster_name == "":
raise ExecutionFailureException(
"cluster_name not found, please set the '--cluster-name' flag")
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
if cephfs_filesystem == "":
entity = "{}-{}".format(entity, cluster_name)
else:
entity = "{}-{}-{}".format(entity,
cluster_name, cephfs_filesystem)
caps["osd"] = "allow rw tag cephfs data={}".format(
cephfs_filesystem)
return caps, entity
def get_rbd_provisioner_caps_and_entity(self):
entity = "client.csi-rbd-provisioner"
caps = {"mon": "profile rbd, allow command 'osd blocklist'",
"mgr": "allow rw",
"osd": "profile rbd"}
if self._arg_parser.restricted_auth_permission:
rbd_pool_name = self._arg_parser.rbd_data_pool_name
cluster_name = self._arg_parser.cluster_name
rados_namespace = self._arg_parser.rados_namespace
if rbd_pool_name == "" or cluster_name == "" or rados_namespace == "":
raise ExecutionFailureException(
"mandatory flags not found, please set the '--rbd-data-pool-name', '--cluster-name' and --rados-namespace flags")
entity = "{}-{}-{}-{}".format(entity, cluster_name,
rbd_pool_name, rados_namespace)
caps["osd"] = "profile rbd pool={}".format(rbd_pool_name)
return caps, entity
def get_rbd_node_caps_and_entity(self):
entity = "client.csi-rbd-node"
caps = {"mon": "profile rbd, allow command 'osd blocklist'",
"osd": "profile rbd"}
if self._arg_parser.restricted_auth_permission:
rbd_pool_name = self._arg_parser.rbd_data_pool_name
cluster_name = self._arg_parser.cluster_name
rados_namespace = self._arg_parser.rados_namespace
if rbd_pool_name == "" or cluster_name == "" or rados_namespace == "":
raise ExecutionFailureException(
"mandatory flags not found, please set the '--rbd-data-pool-name', '--cluster-name' and --rados-namespace flags")
entity = "{}-{}-{}-{}".format(entity, cluster_name,
rbd_pool_name, rados_namespace)
caps["osd"] = "profile rbd pool={}".format(rbd_pool_name)
return caps, entity
def get_caps_and_entity(self, user_name):
if "client.csi-cephfs-provisioner" in user_name:
if "client.csi-cephfs-provisioner" != user_name:
self._arg_parser.restricted_auth_permission = True
return self.get_cephfs_provisioner_caps_and_entity()
elif "client.csi-cephfs-node" in user_name:
if "client.csi-cephfs-node" != user_name:
self._arg_parser.restricted_auth_permission = True
return self.get_cephfs_node_caps_and_entity()
elif "client.csi-rbd-provisioner" in user_name:
if "client.csi-rbd-provisioner" != user_name:
self._arg_parser.restricted_auth_permission = True
return self.get_rbd_provisioner_caps_and_entity()
elif "client.csi-rbd-node" in user_name:
if "client.csi-rbd-node" != user_name:
self._arg_parser.restricted_auth_permission = True
return self.get_rbd_node_caps_and_entity()
raise ExecutionFailureException(
"no user found with user_name: {} ,".format(user_name)
+ "get_caps_and_entity command failed.\n")
def create_cephCSIKeyring_user(self, user):
'''
command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'
'''
caps, entity = self.get_caps_and_entity(user)
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": [cap for cap_list in list(caps.items()) for cap in cap_list],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps']))
# check if user already exist
user_key = self.check_user_exist(entity)
if user_key != "":
return user_key
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create {}' command failed.\n".format(user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt, report an error
if ret_val != 0:
# if fs and data_pool arguments are not set, silently return
if self._arg_parser.cephfs_filesystem_name == "" and self._arg_parser.cephfs_data_pool_name == "":
return
# if user has provided any of the
# '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments,
# raise an exception as we are unable to verify the args
raise ExecutionFailureException(
"'fs ls' ceph call failed with error: {}".format(err_msg))
matching_json_out = {}
# if '--cephfs-filesystem-name' argument is provided,
# check whether the provided filesystem-name exists or not
if self._arg_parser.cephfs_filesystem_name:
# get the matching list
matching_json_out_list = [matched for matched in json_out
if str(matched['name']) == self._arg_parser.cephfs_filesystem_name]
# unable to find a matching fs-name, raise an error
if len(matching_json_out_list) == 0:
raise ExecutionFailureException(
("Filesystem provided, '{}', " +
"is not found in the fs-list: '{}'").format(
self._arg_parser.cephfs_filesystem_name,
[str(x['name']) for x in json_out]))
matching_json_out = matching_json_out_list[0]
# if cephfs filesystem name is not provided,
# try to get a default fs name by doing the following
else:
# a. check if there is only one filesystem is present
if len(json_out) == 1:
matching_json_out = json_out[0]
# b. or else, check if data_pool name is provided
elif self._arg_parser.cephfs_data_pool_name:
# and if present, check whether there exists a fs which has the data_pool
for eachJ in json_out:
if self._arg_parser.cephfs_data_pool_name in eachJ['data_pools']:
matching_json_out = eachJ
break
# if there is no matching fs exists, that means provided data_pool name is invalid
if not matching_json_out:
raise ExecutionFailureException(
"Provided data_pool name, {}, does not exists".format(
self._arg_parser.cephfs_data_pool_name))
# c. if nothing is set and couldn't find a default,
else:
# just return silently
return
if matching_json_out:
self._arg_parser.cephfs_filesystem_name = str(
matching_json_out['name'])
self._arg_parser.cephfs_metadata_pool_name = str(
matching_json_out['metadata_pool'])
if type(matching_json_out['data_pools']) == list:
# if the user has already provided data-pool-name,
# through --cephfs-data-pool-name
if self._arg_parser.cephfs_data_pool_name:
# if the provided name is not matching with the one in the list
if self._arg_parser.cephfs_data_pool_name not in matching_json_out['data_pools']:
raise ExecutionFailureException(
"{}: '{}', {}: {}".format(
"Provided data-pool-name",
self._arg_parser.cephfs_data_pool_name,
"doesn't match from the data-pools' list",
[str(x) for x in matching_json_out['data_pools']]))
# if data_pool name is not provided,
# then try to find a default data pool name
else:
# if no data_pools exist, silently return
if len(matching_json_out['data_pools']) == 0:
return
self._arg_parser.cephfs_data_pool_name = str(
matching_json_out['data_pools'][0])
# if there are more than one 'data_pools' exist,
# then warn the user that we are using the selected name
if len(matching_json_out['data_pools']) > 1:
print("{}: {}\n{}: '{}'\n".format(
"WARNING: Multiple data pools detected",
[str(x) for x in matching_json_out['data_pools']],
"Using the data-pool",
self._arg_parser.cephfs_data_pool_name))
def create_checkerKey(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": self.run_as_user,
"caps": ["mon", self.MIN_USER_CAP_PERMISSIONS['mon'],
"mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'],
"osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps']))
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create {}' command failed\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def get_ceph_dashboard_link(self):
cmd_json = {"prefix": "mgr services", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'])
ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
return None
if not 'dashboard' in json_out:
return None
return json_out['dashboard']
def create_rgw_admin_ops_user(self):
cmd = ['radosgw-admin', 'user', 'create', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, '--display-name',
'Rook RGW Admin Ops user', '--caps', 'buckets=*;users=*;usage=read;metadata=read;zone=read']
if self._arg_parser.dry_run:
return self.dry_run("ceph " + "".joing(cmd))
try:
output = subprocess.check_output(cmd,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as execErr:
# if the user already exists, we just query it
if execErr.returncode == errno.EEXIST:
cmd = ['radosgw-admin', 'user', 'info',
'--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME
]
try:
output = subprocess.check_output(cmd,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as execErr:
err_msg = "failed to execute command %s. Output: %s. Code: %s. Error: %s" % (
cmd, execErr.output, execErr.returncode, execErr.stderr)
raise Exception(err_msg)
else:
err_msg = "failed to execute command %s. Output: %s. Code: %s. Error: %s" % (
cmd, execErr.output, execErr.returncode, execErr.stderr)
raise Exception(err_msg)
jsonoutput = json.loads(output)
return jsonoutput["keys"][0]['access_key'], jsonoutput["keys"][0]['secret_key']
def validate_pool(self):
pools_to_validate = [self._arg_parser.rbd_data_pool_name]
# if rgw_endpoint is provided, validate it
if self._arg_parser.rgw_endpoint:
self._invalid_endpoint(self._arg_parser.rgw_endpoint)
self.endpoint_dial(self._arg_parser.rgw_endpoint,
cert=self.validate_rgw_endpoint_tls_cert())
rgw_pool_to_validate = ["{0}.rgw.meta".format(self._arg_parser.rgw_pool_prefix),
".rgw.root",
"{0}.rgw.control".format(
self._arg_parser.rgw_pool_prefix),
"{0}.rgw.log".format(
self._arg_parser.rgw_pool_prefix)]
pools_to_validate.extend(rgw_pool_to_validate)
for pool in pools_to_validate:
if not self.cluster.pool_exists(pool):
raise ExecutionFailureException(
"The provided pool, '{}', does not exist".format(pool))
def validate_rados_namespace(self):
rbd_pool_name = self._arg_parser.rbd_data_pool_name
rados_namespace = self._arg_parser.rados_namespace
if rados_namespace == "":
return
rbd_inst = rbd.RBD()
ioctx = self.cluster.open_ioctx(rbd_pool_name)
if rbd_inst.namespace_exists(ioctx, rados_namespace) == False:
raise ExecutionFailureException(
("The provided rados Namespace, '{}', is not found in the pool '{}'").format(
rados_namespace, rbd_pool_name))
def _gen_output_map(self):
if self.out_map:
return
self.validate_pool()
self.validate_rados_namespace()
self._excluded_keys.add('CLUSTER_NAME')
self.get_cephfs_data_pool_details()
self.out_map['NAMESPACE'] = self._arg_parser.namespace
self.out_map['CLUSTER_NAME'] = self._arg_parser.cluster_name
self.out_map['ROOK_EXTERNAL_FSID'] = self.get_fsid()
self.out_map['ROOK_EXTERNAL_USERNAME'] = self.run_as_user
self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'] = self.get_ceph_external_mon_data()
self.out_map['ROOK_EXTERNAL_USER_SECRET'] = self.create_checkerKey()
self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK'] = self.get_ceph_dashboard_link()
self.out_map['CSI_RBD_NODE_SECRET_SECRET'] = self.create_cephCSIKeyring_user(
"client.csi-rbd-node")
self.out_map['CSI_RBD_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_user(
"client.csi-rbd-provisioner")
self.out_map['CEPHFS_POOL_NAME'] = self._arg_parser.cephfs_data_pool_name
self.out_map['CEPHFS_METADATA_POOL_NAME'] = self._arg_parser.cephfs_metadata_pool_name
self.out_map['CEPHFS_FS_NAME'] = self._arg_parser.cephfs_filesystem_name
self.out_map['RESTRICTED_AUTH_PERMISSION'] = self._arg_parser.restricted_auth_permission
self.out_map['RADOS_NAMESPACE'] = self._arg_parser.rados_namespace
self.out_map['CSI_CEPHFS_NODE_SECRET'] = ''
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = ''
# create CephFS node and provisioner keyring only when MDS exists
if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']:
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_user(
"client.csi-cephfs-node")
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_user(
"client.csi-cephfs-provisioner")
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['RGW_TLS_CERT'] = ''
self.out_map['MONITORING_ENDPOINT'], \
self.out_map['MONITORING_ENDPOINT_PORT'] = self.get_active_and_standby_mgrs()
self.out_map['RBD_POOL_NAME'] = self._arg_parser.rbd_data_pool_name
self.out_map['RBD_METADATA_EC_POOL_NAME'] = self.validate_rgw_metadata_ec_pool_name()
self.out_map['RGW_POOL_PREFIX'] = self._arg_parser.rgw_pool_prefix
if self._arg_parser.rgw_endpoint:
self.out_map['ACCESS_KEY'], self.out_map['SECRET_KEY'] = self.create_rgw_admin_ops_user()
if self._arg_parser.rgw_tls_cert_path:
self.out_map['RGW_TLS_CERT'] = self.validate_rgw_endpoint_tls_cert()
def gen_shell_out(self):
self._gen_output_map()
shOutIO = StringIO()
for k, v in self.out_map.items():
if v and k not in self._excluded_keys:
shOutIO.write('export {}={}{}'.format(k, v, LINESEP))
shOut = shOutIO.getvalue()
shOutIO.close()
return shOut
def gen_json_out(self):
self._gen_output_map()
json_out = [
{
"name": "rook-ceph-mon-endpoints",
"kind": "ConfigMap",
"data": {
"data": self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'],
"maxMonId": "0",
"mapping": "{}"
}
},
{
"name": "rook-ceph-mon",
"kind": "Secret",
"data": {
"admin-secret": "admin-secret",
"fsid": self.out_map['ROOK_EXTERNAL_FSID'],
"mon-secret": "mon-secret"
},
},
{
"name": "rook-ceph-operator-creds",
"kind": "Secret",
"data": {
"userID": self.out_map['ROOK_EXTERNAL_USERNAME'],
"userKey": self.out_map['ROOK_EXTERNAL_USER_SECRET']
}
},
{
"name": "monitoring-endpoint",
"kind": "CephCluster",
"data": {
"MonitoringEndpoint": self.out_map['MONITORING_ENDPOINT'],
"MonitoringPort": self.out_map['MONITORING_ENDPOINT_PORT']
}
}
]
if self.out_map['RBD_METADATA_EC_POOL_NAME']:
json_out.append({
"name": "ceph-rbd",
"kind": "StorageClass",
"data": {
"dataPool": self.out_map['RBD_POOL_NAME'],
"pool": self.out_map['RBD_METADATA_EC_POOL_NAME']
},
})
else:
json_out.append({
"name": "ceph-rbd",
"kind": "StorageClass",
"data": {
"pool": self.out_map['RBD_POOL_NAME']
},
})
if self._arg_parser.restricted_auth_permission:
cluster_name = self._arg_parser.cluster_name
rbd_pool_name = self._arg_parser.rbd_data_pool_name
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
rados_namespace = self._arg_parser.rados_namespace
json_out.append({
"name": "rook-csi-rbd-node-{}-{}-{}".format(cluster_name, rbd_pool_name, rados_namespace),
"kind": "Secret",
"data": {
"userID": 'csi-rbd-node-{}-{}-{}'.format(cluster_name, rbd_pool_name, rados_namespace),
"userKey": self.out_map['CSI_RBD_NODE_SECRET_SECRET']
}
})
# if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
if self.out_map['CSI_RBD_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-rbd-provisioner-{}-{}-{}".format(cluster_name, rbd_pool_name, rados_namespace),
"kind": "Secret",
"data": {
"userID": 'csi-rbd-provisioner-{}-{}-{}'.format(cluster_name, rbd_pool_name, rados_namespace),
"userKey": self.out_map['CSI_RBD_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret
if self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] and cephfs_filesystem != "":
json_out.append({
"name": "rook-csi-cephfs-provisioner-{}-{}".format(cluster_name, cephfs_filesystem),
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-provisioner-{}-{}'.format(cluster_name, cephfs_filesystem),
"adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']
},
})
if self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] and cephfs_filesystem == "":
json_out.append({
"name": "rook-csi-cephfs-provisioner-{}".format(cluster_name),
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-provisioner-{}'.format(cluster_name),
"adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret
if self.out_map['CSI_CEPHFS_NODE_SECRET'] and cephfs_filesystem != "":
json_out.append({
"name": "rook-csi-cephfs-node-{}-{}".format(cluster_name, cephfs_filesystem),
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-node-{}-{}'.format(cluster_name, cephfs_filesystem),
"adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET']
}
})
if self.out_map['CSI_CEPHFS_NODE_SECRET'] and cephfs_filesystem == "":
json_out.append({
"name": "rook-csi-cephfs-node-{}".format(cluster_name),
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-node-{}'.format(cluster_name),
"adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET']
}
})
else:
json_out.append({
"name": "rook-csi-rbd-node",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-node',
"userKey": self.out_map['CSI_RBD_NODE_SECRET_SECRET']
}
})
# if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
if self.out_map['CSI_RBD_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-rbd-provisioner",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-provisioner',
"userKey": self.out_map['CSI_RBD_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret
if self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-cephfs-provisioner",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-provisioner',
"adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret
if self.out_map['CSI_CEPHFS_NODE_SECRET']:
json_out.append({
"name": "rook-csi-cephfs-node",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-node',
"adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET']
}
})
# if 'ROOK_EXTERNAL_DASHBOARD_LINK' exists, then only add 'rook-ceph-dashboard-link' Secret
if self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK']:
json_out.append({
"name": "rook-ceph-dashboard-link",
"kind": "Secret",
"data": {
"userID": 'ceph-dashboard-link',
"userKey": self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK']
}
})
# if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass
if self.out_map['CEPHFS_FS_NAME']:
json_out.append({
"name": "cephfs",
"kind": "StorageClass",
"data": {
"fsName": self.out_map['CEPHFS_FS_NAME'],
"pool": self.out_map['CEPHFS_POOL_NAME']
}
})
# if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass
if self.out_map['RGW_ENDPOINT']:
json_out.append({
"name": "ceph-rgw",
"kind": "StorageClass",
"data": {
"endpoint": self.out_map['RGW_ENDPOINT'],
"poolPrefix": self.out_map['RGW_POOL_PREFIX']
}
})
json_out.append(
{
"name": "rgw-admin-ops-user",
"kind": "Secret",
"data": {
"accessKey": self.out_map['ACCESS_KEY'],
"secretKey": self.out_map['SECRET_KEY']
}
})
# if 'RGW_TLS_CERT' exists, then only add the "ceph-rgw-tls-cert" secret
if self.out_map['RGW_TLS_CERT']:
json_out.append({
"name": "ceph-rgw-tls-cert",
"kind": "Secret",
"data": {
"cert": self.out_map['RGW_TLS_CERT'],
}
})
if self._arg_parser.dry_run:
return ""
return json.dumps(json_out)+LINESEP
def upgrade_users_permissions(self):
users = ["client.csi-cephfs-node", "client.csi-cephfs-provisioner",
"client.csi-rbd-node", "client.csi-rbd-provisioner"]
if self.run_as_user != "" and self.run_as_user not in users:
users.append(self.run_as_user)
for user in users:
self.upgrade_user_permissions(user)
def upgrade_user_permissions(self, user):
# check whether the given user exists or not
cmd_json = {"prefix": "auth get", "entity": "{}".format(
user), "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0 or len(json_out) == 0:
print("user {} not found for upgrading.".format(user))
return
existing_caps = json_out[0]['caps']
new_cap, _ = self.get_caps_and_entity(user)
cap_keys = ["mon", "mgr", "osd", "mds"]
caps = []
for eachCap in cap_keys:
cur_cap_values = existing_caps.get(eachCap, '')
new_cap_values = new_cap.get(eachCap, '')
cur_cap_perm_list = [x.strip()
for x in cur_cap_values.split(',') if x.strip()]
new_cap_perm_list = [x.strip()
for x in new_cap_values.split(',') if x.strip()]
# append new_cap_list to cur_cap_list to maintain the order of caps
cur_cap_perm_list.extend(new_cap_perm_list)
# eliminate duplicates without using 'set'
# set re-orders items in the list and we have to keep the order
new_cap_list = []
[new_cap_list.append(
x) for x in cur_cap_perm_list if x not in new_cap_list]
existing_caps[eachCap] = ", ".join(new_cap_list)
if existing_caps[eachCap]:
caps.append(eachCap)
caps.append(existing_caps[eachCap])
cmd_json = {"prefix": "auth caps",
"entity": user,
"caps": caps,
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0:
raise ExecutionFailureException("'auth caps {}' command failed.\n".format(user) +
"Error: {}".format(err_msg))
print("Updated user, {}, successfully.".format(user))
def main(self):
generated_output = ''
if self._arg_parser.upgrade:
self.upgrade_users_permissions()
elif self._arg_parser.format == 'json':
generated_output = self.gen_json_out()
elif self._arg_parser.format == 'bash':
generated_output = self.gen_shell_out()
else:
raise ExecutionFailureException("Unsupported format: {}".format(
self._arg_parser.format))
print('{}'.format(generated_output))
if self.output_file and generated_output:
fOut = open(self.output_file, 'w')
fOut.write(generated_output)
fOut.close()
################################################
##################### MAIN #####################
################################################
if __name__ == '__main__':
rjObj = RadosJSON()
try:
rjObj.main()
except ExecutionFailureException as err:
print("Execution Failed: {}".format(err))
raise err
except KeyError as kErr:
print("KeyError: %s", kErr)
except OSError as osErr:
print("Error while trying to output the data: {}".format(osErr))
finally:
rjObj.shutdown()
|
|
import hashlib
import mimetypes
import os
import posixpath
import re
from time import time
from urlparse import urlsplit, urlunsplit
from werkzeug.exceptions import NotFound
from werkzeug.http import is_resource_modified, http_date
from spa.static.handlers import StaticHandler
from spa.utils import clean_path
class HashCache(object):
def __init__(self):
self.path_hashes = {}
self.contents = {}
def get_path_hash(self, path):
return self.path_hashes.get(path)
def set_path_hash(self, path, path_hash):
self.path_hashes[path] = path_hash
def get_contents(self, path):
return self.contents.get(path)
def set_contents(self, path, contents):
self.contents[path] = contents
class CacheBustingStaticHandler(StaticHandler):
css_url_patterns = (
(re.compile(r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""", re.IGNORECASE),
"""url("{hashed_url}")"""),
(re.compile(r"""(@import\s*["']\s*(.*?)["'])""", re.IGNORECASE),
"""@import url("{hashed_url}")"""),
)
def __init__(self, app, req, params, directory, hash_cache, **kwargs):
self.hash_cache = hash_cache
return super(CacheBustingStaticHandler, self).__init__(
app, req, params, directory, **kwargs
)
def get(self, filepath):
unhashed_path, path_hash = parse_hashed_filepath(filepath)
if unhashed_path is None:
return NotFound()
if self.hash_cache.get_path_hash(unhashed_path) is None:
# compute hash, and cache it.
file = self.get_file(unhashed_path)
if file is None:
return NotFound()
try:
hash_str = get_hash(file.handle)
self.hash_cache.set_path_hash(unhashed_path, hash_str)
finally:
file.handle.close()
# If hash we were passed doesn't equal the one we've computed and
# cached, then 404.
if path_hash != self.hash_cache.get_path_hash(unhashed_path):
return NotFound()
# For CSS stylesheets only, we'll rewrite content so that url()
# functions will point to hashed filenames instead of unhashed. The
# rewritten CSS content will be kept in memory.
if mimetypes.guess_type(filepath)[0] == 'text/css':
return self.make_css_response(unhashed_path)
return super(CacheBustingStaticHandler, self).get(unhashed_path)
def make_css_response(self, filepath):
def resp(environ, start_response):
file = self.get_file(filepath)
try:
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(file.mtime, file.size, file.name)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=file.mtime):
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
contents = self.hash_cache.get_contents(filepath)
if contents is None:
contents = file.handle.read()
for pat, tpl in self.css_url_patterns:
converter = self.get_converter(tpl)
contents = pat.sub(converter, contents)
self.hash_cache.set_contents(filepath, contents)
headers.extend((
('Content-Type', file.mimetype),
('Content-Length', len(contents)),
('Last-Modified', http_date(file.mtime))
))
start_response('200 OK', headers)
return [contents]
finally:
file.handle.close()
return resp
def get_converter(self, tpl):
def converter(matchobj):
matched, url = matchobj.groups()
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return url
return tpl.format(hashed_url=self.convert_css_url(url))
return converter
def convert_css_url(self, css_url):
split_url = urlsplit(css_url)
url_path = split_url.path
if not url_path.startswith('/'):
abs_url_path = self.make_path_absolute(url_path)
else:
abs_url_path = posixpath.realpath(url_path)
prefix = self.get_url_prefix()
# now make the path as it would be passed in to this handler when
# requested from the web. From there we can use existing methods on the
# class to resolve to a real file.
_, _, content_filepath = abs_url_path.partition(prefix)
content_filepath = clean_path(content_filepath)
content_file_hash = self.hash_cache.get_path_hash(content_filepath)
if content_file_hash is None:
content_file = self.get_file(content_filepath)
if content_file is None:
return 'NOT FOUND: "%s"' % url_path
try:
content_file_hash = get_hash(content_file.handle)
finally:
content_file.handle.close()
parts = list(split_url)
parts[2] = add_hash_to_filepath(url_path, content_file_hash)
url = urlunsplit(parts)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in css_url:
parts = list(urlsplit(url))
if not parts[3]:
parts[2] += '?'
url = urlunsplit(parts)
return url
def get_url_prefix(self):
"""
Return the mount point for this handler. So if you had a route like
this:
('/foo/bar/static/<path:filepath>', 'foo', Handler)
Then this function should return '/foo/bar/static/'
"""
env = self.request.environ
filepath = self.params['filepath']
prefix, _, _ = (env['SCRIPT_NAME'] +
env['PATH_INFO']).rpartition(filepath)
return prefix
def make_path_absolute(self, path):
"""
Given a relative url found inside the CSS file we're currently serving,
return an absolute form of that URL.
"""
env = self.request.environ
pinfo = posixpath.dirname(env['PATH_INFO'])
return posixpath.realpath(env['SCRIPT_NAME'] + pinfo + '/' + path)
def parse_hashed_filepath(filename, hash_len=12):
"""
Given a name like '/static/my_file.deadbeef1234.txt', return a tuple of the file name
without the hash, and the hash itself, like this:
('/static/my_file.txt', 'deadbeef1234')
If no hash part is found, then return (None, None).
"""
pat = '^(?P<before>.*)\.(?P<hash>[0-9,a-f]{%s})(?P<after>.*?)$' % hash_len
m = re.match(pat, filename)
if m is None:
return None, None
parts = m.groupdict()
return '{before}{after}'.format(**parts), parts['hash']
def add_hash_to_filepath(filepath, hash_str):
path, filename = os.path.split(filepath)
root, ext = os.path.splitext(filename)
return os.path.join(path, "%s.%s%s" % (root, hash_str, ext))
def get_hash(lines, hash_len=12):
md5 = hashlib.md5()
for line in lines:
md5.update(line)
return md5.hexdigest()[:hash_len]
class SmartStatic(object):
"""
A factory for making CacheBustingStaticHandler instances that share a cache
instance.
"""
def __init__(self, directory):
self.directory = directory
self.hash_cache = HashCache()
def __call__(self, app, req, params, **kwargs):
return CacheBustingStaticHandler(app, req, params,
directory=self.directory,
hash_cache=self.hash_cache,
**kwargs)
|
|
#! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy.lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
|
|
import random
from collections import namedtuple
import pytest
from markupsafe import Markup
from jinja2 import Environment
from jinja2 import StrictUndefined
from jinja2 import TemplateRuntimeError
from jinja2 import UndefinedError
from jinja2.exceptions import TemplateAssertionError
class Magic:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class Magic2:
def __init__(self, value1, value2):
self.value1 = value1
self.value2 = value2
def __str__(self):
return f"({self.value1},{self.value2})"
class TestFilter:
def test_filter_calling(self, env):
rv = env.call_filter("sum", [1, 2, 3])
assert rv == 6
def test_capitalize(self, env):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == "Foo bar"
def test_center(self, env):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == " foo "
def test_default(self, env):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given="yes") == "no|False|no|yes"
@pytest.mark.parametrize(
"args,expect",
(
("", "[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]"),
("true", "[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]"),
('by="value"', "[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]"),
("reverse=true", "[('c', 2), ('b', 1), ('AB', 3), ('aa', 0)]"),
),
)
def test_dictsort(self, env, args, expect):
t = env.from_string(f"{{{{ foo|dictsort({args}) }}}}")
out = t.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == expect
def test_batch(self, env):
tmpl = env.from_string("{{ foo|batch(3)|list }}|{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == (
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]"
)
def test_slice(self, env):
tmpl = env.from_string("{{ foo|slice(3)|list }}|{{ foo|slice(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == (
"[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]"
)
def test_escape(self, env):
tmpl = env.from_string("""{{ '<">&'|escape }}""")
out = tmpl.render()
assert out == "<">&"
@pytest.mark.parametrize(
("chars", "expect"), [(None, "..stays.."), (".", " ..stays"), (" .", "stays")]
)
def test_trim(self, env, chars, expect):
tmpl = env.from_string("{{ foo|trim(chars) }}")
out = tmpl.render(foo=" ..stays..", chars=chars)
assert out == expect
def test_striptags(self, env):
tmpl = env.from_string("""{{ foo|striptags }}""")
out = tmpl.render(
foo=' <p>just a small \n <a href="#">'
"example</a> link</p>\n<p>to a webpage</p> "
"<!-- <p>and some commented stuff</p> -->"
)
assert out == "just a small example link to a webpage"
def test_filesizeformat(self, env):
tmpl = env.from_string(
"{{ 100|filesizeformat }}|"
"{{ 1000|filesizeformat }}|"
"{{ 1000000|filesizeformat }}|"
"{{ 1000000000|filesizeformat }}|"
"{{ 1000000000000|filesizeformat }}|"
"{{ 100|filesizeformat(true) }}|"
"{{ 1000|filesizeformat(true) }}|"
"{{ 1000000|filesizeformat(true) }}|"
"{{ 1000000000|filesizeformat(true) }}|"
"{{ 1000000000000|filesizeformat(true) }}"
)
out = tmpl.render()
assert out == (
"100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|"
"1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB"
)
def test_filesizeformat_issue59(self, env):
tmpl = env.from_string(
"{{ 300|filesizeformat }}|"
"{{ 3000|filesizeformat }}|"
"{{ 3000000|filesizeformat }}|"
"{{ 3000000000|filesizeformat }}|"
"{{ 3000000000000|filesizeformat }}|"
"{{ 300|filesizeformat(true) }}|"
"{{ 3000|filesizeformat(true) }}|"
"{{ 3000000|filesizeformat(true) }}"
)
out = tmpl.render()
assert out == (
"300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|2.9 KiB|2.9 MiB"
)
def test_first(self, env):
tmpl = env.from_string("{{ foo|first }}")
out = tmpl.render(foo=list(range(10)))
assert out == "0"
@pytest.mark.parametrize(
("value", "expect"), (("42", "42.0"), ("abc", "0.0"), ("32.32", "32.32"))
)
def test_float(self, env, value, expect):
t = env.from_string("{{ value|float }}")
assert t.render(value=value) == expect
def test_float_default(self, env):
t = env.from_string("{{ value|float(default=1.0) }}")
assert t.render(value="abc") == "1.0"
def test_format(self, env):
tmpl = env.from_string("{{ '%s|%s'|format('a', 'b') }}")
out = tmpl.render()
assert out == "a|b"
@staticmethod
def _test_indent_multiline_template(env, markup=False):
text = "\n".join(["", "foo bar", '"baz"', ""])
if markup:
text = Markup(text)
t = env.from_string("{{ foo|indent(2, false, false) }}")
assert t.render(foo=text) == '\n foo bar\n "baz"\n'
t = env.from_string("{{ foo|indent(2, false, true) }}")
assert t.render(foo=text) == '\n foo bar\n "baz"\n '
t = env.from_string("{{ foo|indent(2, true, false) }}")
assert t.render(foo=text) == ' \n foo bar\n "baz"\n'
t = env.from_string("{{ foo|indent(2, true, true) }}")
assert t.render(foo=text) == ' \n foo bar\n "baz"\n '
def test_indent(self, env):
self._test_indent_multiline_template(env)
t = env.from_string('{{ "jinja"|indent }}')
assert t.render() == "jinja"
t = env.from_string('{{ "jinja"|indent(first=true) }}')
assert t.render() == " jinja"
t = env.from_string('{{ "jinja"|indent(blank=true) }}')
assert t.render() == "jinja"
def test_indent_markup_input(self, env):
"""
Tests cases where the filter input is a Markup type
"""
self._test_indent_multiline_template(env, markup=True)
def test_indent_width_string(self, env):
t = env.from_string("{{ 'jinja\nflask'|indent(width='>>> ', first=True) }}")
assert t.render() == ">>> jinja\n>>> flask"
@pytest.mark.parametrize(
("value", "expect"),
(
("42", "42"),
("abc", "0"),
("32.32", "32"),
("12345678901234567890", "12345678901234567890"),
),
)
def test_int(self, env, value, expect):
t = env.from_string("{{ value|int }}")
assert t.render(value=value) == expect
@pytest.mark.parametrize(
("value", "base", "expect"),
(("0x4d32", 16, "19762"), ("011", 8, "9"), ("0x33Z", 16, "0")),
)
def test_int_base(self, env, value, base, expect):
t = env.from_string("{{ value|int(base=base) }}")
assert t.render(value=value, base=base) == expect
def test_int_default(self, env):
t = env.from_string("{{ value|int(default=1) }}")
assert t.render(value="abc") == "1"
def test_int_special_method(self, env):
class IntIsh:
def __int__(self):
return 42
t = env.from_string("{{ value|int }}")
assert t.render(value=IntIsh()) == "42"
def test_join(self, env):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == "1|2|3"
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == "<foo><span>foo</span>"
def test_join_attribute(self, env):
User = namedtuple("User", "username")
tmpl = env.from_string("""{{ users|join(', ', 'username') }}""")
assert tmpl.render(users=map(User, ["foo", "bar"])) == "foo, bar"
def test_last(self, env):
tmpl = env.from_string("""{{ foo|last }}""")
out = tmpl.render(foo=list(range(10)))
assert out == "9"
def test_length(self, env):
tmpl = env.from_string("""{{ "hello world"|length }}""")
out = tmpl.render()
assert out == "11"
def test_lower(self, env):
tmpl = env.from_string("""{{ "FOO"|lower }}""")
out = tmpl.render()
assert out == "foo"
def test_pprint(self, env):
from pprint import pformat
tmpl = env.from_string("""{{ data|pprint }}""")
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self, env, request):
# restore the random state when the test ends
state = random.getstate()
request.addfinalizer(lambda: random.setstate(state))
# generate the random values from a known seed
random.seed("jinja")
expected = [random.choice("1234567890") for _ in range(10)]
# check that the random sequence is generated again by a template
# ensures that filter result is not constant folded
random.seed("jinja")
t = env.from_string('{{ "1234567890"|random }}')
for value in expected:
assert t.render() == value
def test_reverse(self, env):
tmpl = env.from_string(
"{{ 'foobar'|reverse|join }}|{{ [1, 2, 3]|reverse|list }}"
)
assert tmpl.render() == "raboof|[3, 2, 1]"
def test_string(self, env):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string("""{{ obj|string }}""")
assert tmpl.render(obj=x) == str(x)
def test_title(self, env):
tmpl = env.from_string("""{{ "foo bar"|title }}""")
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string("""{{ "foo's bar"|title }}""")
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string("""{{ "foo bar"|title }}""")
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string("""{{ "f bar f"|title }}""")
assert tmpl.render() == "F Bar F"
tmpl = env.from_string("""{{ "foo-bar"|title }}""")
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string("""{{ "foo\tbar"|title }}""")
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string("""{{ "FOO\tBAR"|title }}""")
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string("""{{ "foo (bar)"|title }}""")
assert tmpl.render() == "Foo (Bar)"
tmpl = env.from_string("""{{ "foo {bar}"|title }}""")
assert tmpl.render() == "Foo {Bar}"
tmpl = env.from_string("""{{ "foo [bar]"|title }}""")
assert tmpl.render() == "Foo [Bar]"
tmpl = env.from_string("""{{ "foo <bar>"|title }}""")
assert tmpl.render() == "Foo <Bar>"
class Foo:
def __str__(self):
return "foo-bar"
tmpl = env.from_string("""{{ data|title }}""")
out = tmpl.render(data=Foo())
assert out == "Foo-Bar"
def test_truncate(self, env):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
"{{ smalldata|truncate(15) }}"
)
out = tmpl.render(data="foobar baz bar" * 1000, smalldata="foobar baz bar")
assert out == "foobar baz b>>>|foobar baz>>>|foobar baz bar"
def test_truncate_very_short(self, env):
tmpl = env.from_string(
'{{ "foo bar baz"|truncate(9) }}|{{ "foo bar baz"|truncate(9, true) }}'
)
out = tmpl.render()
assert out == "foo bar baz|foo bar baz"
def test_truncate_end_length(self, env):
tmpl = env.from_string('{{ "Joel is a slug"|truncate(7, true) }}')
out = tmpl.render()
assert out == "Joel..."
def test_upper(self, env):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == "FOO"
def test_urlize(self, env):
tmpl = env.from_string('{{ "foo example.org bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="https://example.org" rel="noopener">' "example.org</a> bar"
)
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="http://www.example.com/" rel="noopener">'
"http://www.example.com/</a> bar"
)
tmpl = env.from_string('{{ "foo mailto:email@example.com bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="mailto:email@example.com">email@example.com</a> bar'
)
tmpl = env.from_string('{{ "foo email@example.com bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="mailto:email@example.com">email@example.com</a> bar'
)
def test_urlize_rel_policy(self):
env = Environment()
env.policies["urlize.rel"] = None
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="http://www.example.com/">http://www.example.com/</a> bar'
)
def test_urlize_target_parameter(self, env):
tmpl = env.from_string(
'{{ "foo http://www.example.com/ bar"|urlize(target="_blank") }}'
)
assert (
tmpl.render()
== 'foo <a href="http://www.example.com/" rel="noopener" target="_blank">'
"http://www.example.com/</a> bar"
)
def test_urlize_extra_schemes_parameter(self, env):
tmpl = env.from_string(
'{{ "foo tel:+1-514-555-1234 ftp://localhost bar"|'
'urlize(extra_schemes=["tel:", "ftp:"]) }}'
)
assert tmpl.render() == (
'foo <a href="tel:+1-514-555-1234" rel="noopener">'
'tel:+1-514-555-1234</a> <a href="ftp://localhost" rel="noopener">'
"ftp://localhost</a> bar"
)
def test_wordcount(self, env):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == "3"
strict_env = Environment(undefined=StrictUndefined)
t = strict_env.from_string("{{ s|wordcount }}")
with pytest.raises(UndefinedError):
t.render()
def test_block(self, env):
tmpl = env.from_string("{% filter lower|escape %}<HEHE>{% endfilter %}")
assert tmpl.render() == "<hehe>"
def test_chaining(self, env):
tmpl = env.from_string("""{{ ['<foo>', '<bar>']|first|upper|escape }}""")
assert tmpl.render() == "<FOO>"
def test_sum(self, env):
tmpl = env.from_string("""{{ [1, 2, 3, 4, 5, 6]|sum }}""")
assert tmpl.render() == "21"
def test_sum_attributes(self, env):
tmpl = env.from_string("""{{ values|sum('value') }}""")
assert tmpl.render(values=[{"value": 23}, {"value": 1}, {"value": 18}]) == "42"
def test_sum_attributes_nested(self, env):
tmpl = env.from_string("""{{ values|sum('real.value') }}""")
assert (
tmpl.render(
values=[
{"real": {"value": 23}},
{"real": {"value": 1}},
{"real": {"value": 18}},
]
)
== "42"
)
def test_sum_attributes_tuple(self, env):
tmpl = env.from_string("""{{ values.items()|sum('1') }}""")
assert tmpl.render(values={"foo": 23, "bar": 1, "baz": 18}) == "42"
def test_abs(self, env):
tmpl = env.from_string("""{{ -1|abs }}|{{ 1|abs }}""")
assert tmpl.render() == "1|1", tmpl.render()
def test_round_positive(self, env):
tmpl = env.from_string(
"{{ 2.7|round }}|{{ 2.1|round }}|"
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}"
)
assert tmpl.render() == "3.0|2.0|2.123|3.0", tmpl.render()
def test_round_negative(self, env):
tmpl = env.from_string(
"{{ 21.3|round(-1)}}|"
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}"
)
assert tmpl.render() == "20.0|30.0|20.0", tmpl.render()
def test_xmlattr(self, env):
tmpl = env.from_string(
"{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}"
)
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
def test_sort1(self, env):
tmpl = env.from_string("{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}")
assert tmpl.render() == "[1, 2, 3]|[3, 2, 1]"
def test_sort2(self, env):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == "AbcD"
def test_sort3(self, env):
tmpl = env.from_string("""{{ ['foo', 'Bar', 'blah']|sort }}""")
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value')|join }}""")
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == "1234"
def test_sort5(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value.0')|join }}""")
assert tmpl.render(items=map(Magic, [[3], [2], [4], [1]])) == "[1][2][3][4]"
def test_sort6(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value1,value2')|join }}""")
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]), [(3, 1), (2, 2), (2, 1), (2, 5)]
)
)
== "(2,1)(2,2)(2,5)(3,1)"
)
def test_sort7(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value2,value1')|join }}""")
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]), [(3, 1), (2, 2), (2, 1), (2, 5)]
)
)
== "(2,1)(3,1)(2,2)(2,5)"
)
def test_sort8(self, env):
tmpl = env.from_string(
"""{{ items|sort(attribute='value1.0,value2.0')|join }}"""
)
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]),
[([3], [1]), ([2], [2]), ([2], [1]), ([2], [5])],
)
)
== "([2],[1])([2],[2])([2],[5])([3],[1])"
)
def test_unique(self, env):
t = env.from_string('{{ "".join(["b", "A", "a", "b"]|unique) }}')
assert t.render() == "bA"
def test_unique_case_sensitive(self, env):
t = env.from_string('{{ "".join(["b", "A", "a", "b"]|unique(true)) }}')
assert t.render() == "bAa"
def test_unique_attribute(self, env):
t = env.from_string("{{ items|unique(attribute='value')|join }}")
assert t.render(items=map(Magic, [3, 2, 4, 1, 2])) == "3241"
@pytest.mark.parametrize(
"source,expect",
(
('{{ ["a", "B"]|min }}', "a"),
('{{ ["a", "B"]|min(case_sensitive=true) }}', "B"),
("{{ []|min }}", ""),
('{{ ["a", "B"]|max }}', "B"),
('{{ ["a", "B"]|max(case_sensitive=true) }}', "a"),
("{{ []|max }}", ""),
),
)
def test_min_max(self, env, source, expect):
t = env.from_string(source)
assert t.render() == expect
@pytest.mark.parametrize(("name", "expect"), [("min", "1"), ("max", "9")])
def test_min_max_attribute(self, env, name, expect):
t = env.from_string("{{ items|" + name + '(attribute="value") }}')
assert t.render(items=map(Magic, [5, 1, 9])) == expect
def test_groupby(self, env):
tmpl = env.from_string(
"""
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render().split("|") == ["1: 1, 2: 1, 1", "2: 2, 3", "3: 3, 4", ""]
def test_groupby_tuple_index(self, env):
tmpl = env.from_string(
"""
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render() == "a:1:2|b:1|"
def test_groupby_multidot(self, env):
Date = namedtuple("Date", "day,month,year")
Article = namedtuple("Article", "title,date")
articles = [
Article("aha", Date(1, 1, 1970)),
Article("interesting", Date(2, 1, 1970)),
Article("really?", Date(3, 1, 1970)),
Article("totally not", Date(1, 1, 1971)),
]
tmpl = env.from_string(
"""
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render(articles=articles).split("|") == [
"1970[aha][interesting][really?]",
"1971[totally not]",
"",
]
def test_groupby_default(self, env):
tmpl = env.from_string(
"{% for city, items in users|groupby('city', default='NY') %}"
"{{ city }}: {{ items|map(attribute='name')|join(', ') }}\n"
"{% endfor %}"
)
out = tmpl.render(
users=[
{"name": "emma", "city": "NY"},
{"name": "smith", "city": "WA"},
{"name": "john"},
]
)
assert out == "NY: emma, john\nWA: smith\n"
def test_filtertag(self, env):
tmpl = env.from_string(
"{% filter upper|replace('FOO', 'foo') %}foobar{% endfilter %}"
)
assert tmpl.render() == "fooBAR"
def test_replace(self, env):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string="<foo>") == "<f4242>"
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string="<foo>") == "<f4242>"
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string="<foo>") == "42foo>"
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup("foo")) == "f>x<>x<"
def test_forceescape(self, env):
tmpl = env.from_string("{{ x|forceescape }}")
assert tmpl.render(x=Markup("<div />")) == "<div />"
def test_safe(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == "<div>foo</div>"
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == "<div>foo</div>"
@pytest.mark.parametrize(
("value", "expect"),
[
("Hello, world!", "Hello%2C%20world%21"),
("Hello, world\u203d", "Hello%2C%20world%E2%80%BD"),
({"f": 1}, "f=1"),
([("f", 1), ("z", 2)], "f=1&z=2"),
({"\u203d": 1}, "%E2%80%BD=1"),
({0: 1}, "0=1"),
([("a b/c", "a b/c")], "a+b%2Fc=a+b%2Fc"),
("a b/c", "a%20b/c"),
],
)
def test_urlencode(self, value, expect):
e = Environment(autoescape=True)
t = e.from_string("{{ value|urlencode }}")
assert t.render(value=value) == expect
def test_simple_map(self, env):
env = Environment()
tmpl = env.from_string('{{ ["1", "2", "3"]|map("int")|sum }}')
assert tmpl.render() == "6"
def test_map_sum(self, env):
tmpl = env.from_string('{{ [[1,2], [3], [4,5,6]]|map("sum")|list }}')
assert tmpl.render() == "[3, 3, 15]"
def test_attribute_map(self, env):
User = namedtuple("User", "name")
env = Environment()
users = [
User("john"),
User("jane"),
User("mike"),
]
tmpl = env.from_string('{{ users|map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == "john|jane|mike"
def test_empty_map(self, env):
env = Environment()
tmpl = env.from_string('{{ none|map("upper")|list }}')
assert tmpl.render() == "[]"
def test_map_default(self, env):
Fullname = namedtuple("Fullname", "firstname,lastname")
Firstname = namedtuple("Firstname", "firstname")
env = Environment()
tmpl = env.from_string(
'{{ users|map(attribute="lastname", default="smith")|join(", ") }}'
)
test_list = env.from_string(
'{{ users|map(attribute="lastname", default=["smith","x"])|join(", ") }}'
)
test_str = env.from_string(
'{{ users|map(attribute="lastname", default="")|join(", ") }}'
)
users = [
Fullname("john", "lennon"),
Fullname("jane", "edwards"),
Fullname("jon", None),
Firstname("mike"),
]
assert tmpl.render(users=users) == "lennon, edwards, None, smith"
assert test_list.render(users=users) == "lennon, edwards, None, ['smith', 'x']"
assert test_str.render(users=users) == "lennon, edwards, None, "
def test_simple_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|select("odd")|join("|") }}')
assert tmpl.render() == "1|3|5"
def test_bool_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|select|join("|") }}')
assert tmpl.render() == "1|2|3|4|5"
def test_simple_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|reject("odd")|join("|") }}')
assert tmpl.render() == "2|4"
def test_bool_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|reject|join("|") }}')
assert tmpl.render() == "None|False|0"
def test_simple_select_attr(self, env):
User = namedtuple("User", "name,is_active")
env = Environment()
users = [
User("john", True),
User("jane", True),
User("mike", False),
]
tmpl = env.from_string(
'{{ users|selectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|jane"
def test_simple_reject_attr(self, env):
User = namedtuple("User", "name,is_active")
env = Environment()
users = [
User("john", True),
User("jane", True),
User("mike", False),
]
tmpl = env.from_string(
'{{ users|rejectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "mike"
def test_func_select_attr(self, env):
User = namedtuple("User", "id,name")
env = Environment()
users = [
User(1, "john"),
User(2, "jane"),
User(3, "mike"),
]
tmpl = env.from_string(
'{{ users|selectattr("id", "odd")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|mike"
def test_func_reject_attr(self, env):
User = namedtuple("User", "id,name")
env = Environment()
users = [
User(1, "john"),
User(2, "jane"),
User(3, "mike"),
]
tmpl = env.from_string(
'{{ users|rejectattr("id", "odd")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "jane"
def test_json_dump(self):
env = Environment(autoescape=True)
t = env.from_string("{{ x|tojson }}")
assert t.render(x={"foo": "bar"}) == '{"foo": "bar"}'
assert t.render(x="\"ba&r'") == r'"\"ba\u0026r\u0027"'
assert t.render(x="<bar>") == r'"\u003cbar\u003e"'
def my_dumps(value, **options):
assert options == {"foo": "bar"}
return "42"
env.policies["json.dumps_function"] = my_dumps
env.policies["json.dumps_kwargs"] = {"foo": "bar"}
assert t.render(x=23) == "42"
def test_wordwrap(self, env):
env.newline_sequence = "\n"
t = env.from_string("{{ s|wordwrap(20) }}")
result = t.render(s="Hello!\nThis is Jinja saying something.")
assert result == "Hello!\nThis is Jinja saying\nsomething."
def test_filter_undefined(self, env):
with pytest.raises(TemplateAssertionError, match="No filter named 'f'"):
env.from_string("{{ var|f }}")
def test_filter_undefined_in_if(self, env):
t = env.from_string("{%- if x is defined -%}{{ x|f }}{%- else -%}x{% endif %}")
assert t.render() == "x"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=42)
def test_filter_undefined_in_elif(self, env):
t = env.from_string(
"{%- if x is defined -%}{{ x }}{%- elif y is defined -%}"
"{{ y|f }}{%- else -%}foo{%- endif -%}"
)
assert t.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(y=42)
def test_filter_undefined_in_else(self, env):
t = env.from_string(
"{%- if x is not defined -%}foo{%- else -%}{{ x|f }}{%- endif -%}"
)
assert t.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=42)
def test_filter_undefined_in_nested_if(self, env):
t = env.from_string(
"{%- if x is not defined -%}foo{%- else -%}{%- if y "
"is defined -%}{{ y|f }}{%- endif -%}{{ x }}{%- endif -%}"
)
assert t.render() == "foo"
assert t.render(x=42) == "42"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=24, y=42)
def test_filter_undefined_in_condexpr(self, env):
t1 = env.from_string("{{ x|f if x is defined else 'foo' }}")
t2 = env.from_string("{{ 'foo' if x is not defined else x|f }}")
assert t1.render() == t2.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t1.render(x=42)
t2.render(x=42)
|
|
from datetime import timedelta
from decimal import Decimal
import uuid
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from .interfaces import AbstractPaymentInterface
from .exceptions import InvalidPaymentInterfaceParentClass, InvalidSettingsEntryForPaymentMethodException, \
PaymentCannotBeCancelledByUserException, PaymentInterfaceHasNotBeenSetUpException, \
SettingsEntryForPaymentMethodDoesNotExistException
MONEY_AMOUNT_MAX_DIGITS = 10
DEFAULT_PAYMENT_EXPIRY_PERIOD = timedelta(minutes=10)
class PaymentQuerySet(models.QuerySet):
def pending(self):
return self.filter(status=Payment.PENDING)
def awaiting_for_payment(self):
return self.filter(status=Payment.AWAITING_FOR_PAYMENT)
def expiry_date_in_past(self):
return self.filter(~Q(expires_at=None), expires_at__lte=timezone.now())
class PaymentManager(models.Manager):
def get_queryset(self):
return PaymentQuerySet(self.model, using=self._db)
def pending(self):
return self.get_queryset().pending()
def awaiting_for_payment(self):
return self.get_queryset().awaiting_for_payment()
def expiry_date_in_past(self):
return self.get_queryset().expiry_date_in_past()
def refresh_pending_payments(self, *args, limit=5, **kwargs):
"""Retrieves every pending payment form its payment service provider through the payment interface.
Payment interface will update the instance"""
for payment in self.get_queryset().pending().order_by('updated_at')[:limit]:
payment.retrieve_payment(*args, **kwargs)
def expiry_awaiting_payments_with_expired_dates(self):
"""Find payments which are awaiting for payment and their expiry date is in the past, and then set
the payment status to "EXPIRED".
"""
self.get_queryset().awaiting_for_payment().expiry_date_in_past().update(status=Payment.EXPIRED)
def create_payment(self, currency, amount, user=None, **kwargs):
"""Create a payment object."""
return self.create(currency=currency, amount=amount, user=user, **kwargs)
class Payment(models.Model):
# Assigned before user has paid
CREATED = 1
AWAITING_FOR_PAYMENT = 2
CANCELLED_BY_USER = 3
# Assigned after user has paid
CANCELLED = 10
PENDING = 11
FAILED = 12
EXPIRED = 13
PAID = 14
STATUS_CHOISES = (
(CREATED, _('created')),
(AWAITING_FOR_PAYMENT, _('awaiting for payment')),
(CANCELLED_BY_USER, _('cancelled by user')),
(CANCELLED, _('canceled')),
(PENDING, _('pending')),
(FAILED, _('failed')),
(EXPIRED, _('expired')),
(PAID, _('paid')),
)
payment_method = models.ForeignKey('PaymentMethod', verbose_name=_('payment method'), blank=True, null=True,
related_name='payments')
status = models.PositiveSmallIntegerField(_('status'), choices=STATUS_CHOISES, default=CREATED)
currency = models.CharField(_('currency'), max_length=3)
amount = models.DecimalField(_('amount'), max_digits=MONEY_AMOUNT_MAX_DIGITS, decimal_places=2,
validators=[MinValueValidator(Decimal(0.0))])
expires_at = models.DateTimeField(_('expires'), blank=True, null=True)
created_at = models.DateTimeField(_('created'), auto_now_add=True)
updated_at = models.DateTimeField(_('updated'), auto_now=True)
paid_at = models.DateTimeField(_('paid at'), blank=True, null=True)
payment_details = models.TextField(_('payment details'), blank=True, null=True, default='{}', editable=False,
help_text=_('Technical details. It shows payment details information received '
'from the external payment system.'))
security_token = models.UUIDField(verbose_name=('security token'), editable=False, unique=True,
help_text=_('Token used to authenticate the user.'))
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), blank=True, null=True,
related_name='payments', help_text=_('This payment can be accessed only by the selected '
'user.'))
objects = PaymentManager()
__payment_interface_instance = None
class Meta:
verbose_name = _('payment')
verbose_name_plural = _('payments')
def __str__(self):
if not getattr(self, 'pk', None):
return _('Payment (has not been saved yet)')
return _('Payment ID %d') % (self.pk,)
def __init__(self, *args, **kwargs):
super(Payment, self).__init__(*args, **kwargs)
# Save initial values for the later checks.
self._old_status = self.status
self._old_payment_method = self.payment_method
# Expiry a payment when the date is expired and payment method has already been selected,
# and payment is not being processed by the third-party.
if self.pk \
and not self.is_completed() \
and self._if_expiry_date_has_been_expired() \
and self.is_payment_method_selected():
self.status = self.EXPIRED
self.save()
def clean(self):
# Swapping payment method on a payment is not allowed. Once set has to be sticked to.
if self._old_payment_method and self.payment_method != self._old_payment_method:
raise ValidationError({'payment_method': _('You cannot change a payment method.')})
# Payment which payment method has already been set cannot have another status than CREATED.
if not self.is_created() and not self.is_payment_method_selected():
raise ValidationError(
{'status': _('Status cannot be chaned if payment method has not already been selected.')}
)
def save(self, *args, **kwargs):
# If payment is being processed by the third-party, then take off the expiry date.
if not self.is_created() and not self.is_awaiting_for_payment():
self.expires_at = None
# If payment method has been chosen and is not processed by third-party, then set the expiry date.
# Everytime the instance is saved it bumps the expiry time.
if self.is_payment_method_selected() and not self.is_completed():
self.expires_at = self.get_default_payment_expiry_date()
# Set the security token when saving the instance.
if not self.security_token:
self.security_token = self.generate_unique_payment_security_token()
super(Payment, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('payment-gateway', kwargs={'pk': self.pk, 'security_token': self.get_security_token_string()})
def create_payment(self, *args, **kwargs):
self.get_payment_interface().create_payment(*args, **kwargs)
def create_or_retrieve_payment(self, *args, **kwargs):
self.get_payment_interface().create_or_retrieve_payment(*args, **kwargs)
def execute_payment(self, *args, **kwargs):
"""Execute payment after it's been created and user has completed the checkout process with third-party
payment service.
It might not be necessary to do with every payment service."""
self.get_payment_interface().execute_payment(*args, **kwargs)
def retrieve_payment(self, *args, **kwargs):
"""Retrieve payment details from a third-party service and update Payment instance.
Payment instance update should be handled by the payment interface."""
self.get_payment_interface().retrieve_payment(*args, **kwargs)
def cancel_by_user(self):
"""Cancel payment before it's processed by the third party.
This method is called whenever payment is cancelled by user."""
if not self.can_be_cancelled_by_user():
raise PaymentCannotBeCancelledByUserException
self.status = self.CANCELLED_BY_USER
self.payment_details = None
self.save()
def reset_unique_payment_security_token(self):
"""Assign a new unique payment security token for the payment."""
self.security_token = self.generate_unique_payment_security_token()
self.save()
def is_created(self):
return self.status == self.CREATED
def is_awaiting_for_payment(self):
return self.status == self.AWAITING_FOR_PAYMENT
def is_cancelled(self):
return self.status == self.CANCELLED
def is_pending(self):
return self.status == self.PENDING
def is_failed(self):
return self.status == self.FAILED
def is_expired(self):
return self.status == self.EXPIRED
def is_paid(self):
return self.status == self.PAID
def is_cancelled_by_user(self):
return self.status == self.CANCELLED_BY_USER
def is_payment_method_selected(self):
return self.payment_method is not None
def is_completed(self):
"""Everything what user could have done has been done."""
if self.is_paid() or self.is_pending() or self.is_expired() or self.is_cancelled_by_user() or self.is_failed():
return True
return False
def can_be_cancelled_by_user(self):
"""Check if user can cancel a payment."""
if not self.is_completed():
return True
return False
def get_status_explanation(self):
"""Get an explanation of current payment status."""
return self.get_particular_status_explanation(self.status)
def _if_expiry_date_has_been_expired(self):
"""Check if expiry date on the payment has passed."""
if self.expires_at and self.expires_at < timezone.now():
return True
return False
def get_security_token_string(self):
"""Get a string containing payment's security token."""
if not isinstance(self.security_token, uuid.UUID):
return None
return self.security_token.hex
def get_payment_method(self):
return self.payment_method
def get_payment_interface(self):
if not self.__payment_interface_instance:
# Get a payment interface class from associated payment method object and instantiate it.
self.__payment_interface_instance = self.get_payment_method().get_payment_interface_class()(
self,
self.get_payment_method().get_payment_interface_config()
)
return self.__payment_interface_instance
@staticmethod
def get_particular_status_explanation(status):
"""Get an explanation of payment status provided in the argument."""
return {
Payment.CREATED: _('The transaction has been created.'),
Payment.AWAITING_FOR_PAYMENT: _('The transaction is awaiting to be completed by user.'),
Payment.CANCELLED: _('The transaction has been cancelled by the service.'),
Payment.PENDING: _('The transaction is currently being processed by the service.'),
Payment.FAILED: _('The transaction has failed.'),
Payment.CANCELLED: _('The transaction has been cancelled by the service or your bank.'),
Payment.EXPIRED: _('The transaction has expired.'),
Payment.PAID: _('The transaction has been successfuly completed.'),
Payment.CANCELLED_BY_USER: _('The transaction has been cancelled by user.')
}.get(status, _('The transaction\'s status is unknown.'))
@staticmethod
def get_default_payment_expiry_date():
"""Calculate default payment expiry time for Payment model."""
return timezone.now() + DEFAULT_PAYMENT_EXPIRY_PERIOD
@staticmethod
def generate_unique_payment_security_token():
"""Get unique payment token for the payment."""
token = uuid.uuid4()
if Payment.objects.filter(security_token=token).exists():
return Payment.generate_unique_payment_security_token()
return token
class PaymentMethodQuerySet(models.QuerySet):
def active(self):
return self.filter(active=True)
class PaymentMethodManager(models.Manager):
def get_queryset(self):
return PaymentMethodQuerySet(self.model, using=self._db)
def active(self):
self.get_queryset().active()
class PaymentMethod(models.Model):
title = models.CharField(_('title'), max_length=30)
active = models.BooleanField(_('active'), default=False, help_text=_('Tick it if you want to allow new payments '
'to be made with this payment method.'))
description = models.TextField(_('description'), blank=True)
config_title = models.CharField(_('Config title'), max_length=100, help_text=_('Config title from '
'\'PAYMENT CONFIG\' setting.'))
objects = PaymentMethodManager()
__payment_interface_class = None
__payment_interface_config = dict()
class Meta:
verbose_name = _('payment method')
verbose_name_plural = _('payment methods')
def __str__(self):
return self.title
def get_payment_interface_class(self):
"""Import payment interface specified by user and return its class."""
# Load payment interface configuration
self.__load_payment_interface_config()
# If the payment interface's class has been already imported, then just get it
if self.__payment_interface_class:
return self.__payment_interface_class
# Get payment interface class path from the config
interface_class_path = self.get_payment_interface_config()['INTERFACE_CLASS_PATH']
# Import the payment interface class
payment_interface_class = import_string(interface_class_path)
# Raise an exception when payment class does not inherit from AbstractPaymentInterface
if not issubclass(payment_interface_class, AbstractPaymentInterface):
raise InvalidPaymentInterfaceParentClass('Method: %s, class path: %s' % (str(self), interface_class_path))
# Check payment interface's internal check
if not payment_interface_class.is_setup():
raise PaymentInterfaceHasNotBeenSetUpException
self.__payment_interface_class = payment_interface_class
return self.__payment_interface_class
def is_payment_interface_setup(self):
"""Check if payment interface can be imported."""
try:
self.get_payment_interface_class()
except PaymentInterfaceNotSpecifiedException:
return False
except ImportError:
return False
except InvalidPaymentInterfaceParentClass:
return False
except PaymentInterfaceHasNotBeenSetUpException:
return False
except SettingsEntryForPaymentMethodDoesNotExistException:
return False
except InvalidSettingsEntryForPaymentMethodException:
return False
else:
return True
def get_payment_interface_config(self):
return self.__payment_interface_config
def __load_payment_interface_config(self):
if not self.__payment_interface_config:
payment_config = settings.PAYMENT_CONFIGS.get(self.config_title, None)
if not payment_config:
err_msg = 'Method: \'%s\', config title: \'%s\'' % (str(self), self.config_title)
raise SettingsEntryForPaymentMethodDoesNotExistException(err_msg)
if not payment_config.get('INTERFACE_CLASS_PATH', None):
raise InvalidSettingsEntryForPaymentMethodException(
'Settings entry \'%s\' has a missing key: \'INTERFACE_CLASS_PATH\'.' % (self.config_title,)
)
self.__payment_interface_config = payment_config
return self.__payment_interface_config
|
|
import json
import os
import urllib2
from sendgrid.helpers.mail import *
from sendgrid import *
# NOTE: you will need move this file to the root
# directory of this project to execute properly.
def build_hello_email():
"""Minimum required to send an email"""
from_email = Email("test@example.com")
subject = "Hello World from the SendGrid Python Library"
to_email = Email("test@example.com")
content = Content("text/plain", "some text here")
mail = Mail(from_email, subject, to_email, content)
mail.personalizations[0].add_to(Email("test2@example.com"))
return mail.get()
def build_personalization(personalization):
"""Build personalization mock instance from a mock dict"""
mock_personalization = Personalization()
for to_addr in personalization['to_list']:
personalization.add_to(to_addr)
for cc_addr in personalization['cc_list']:
personalization.add_to(cc_addr)
for bcc_addr in personalization['bcc_list']:
personalization.add_bc(bcc_addr)
for header in personalization['headers']:
personalization.add_header(header)
for substitution in personalization['substitutions']:
personalization.add_substitution(substitution)
for arg in personalization['custom_args']:
personalization.add_custom_arg(arg)
personalization.subject = personalization['subject']
personalization.send_at = personalization['send_at']
return mock_personalization
def get_mock_personalization_dict():
"""Get a dict of personalization mock."""
mock_pers = dict()
mock_pers['to_list'] = [Email("test1@example.com",
"Example User"),
Email("test2@example.com",
"Example User")]
mock_pers['cc_list'] = [Email("test3@example.com",
"Example User"),
Email("test4@example.com",
"Example User")]
mock_pers['bcc_list'] = [Email("test5@example.com"),
Email("test6@example.com")]
mock_pers['subject'] = ("Hello World from the Personalized "
"SendGrid Python Library")
mock_pers['headers'] = [Header("X-Test", "test"),
Header("X-Mock", "true")]
mock_pers['substitutions'] = [Substitution("%name%", "Example User"),
Substitution("%city%", "Denver")]
mock_pers['custom_args'] = [CustomArg("user_id", "343"),
CustomArg("type", "marketing")]
mock_pers['send_at'] = 1443636843
return mock_pers
def build_attachment1():
"""Build attachment mock."""
attachment = Attachment()
attachment.content = ("TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNl"
"Y3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3JhcyBwdW12")
attachment.type = "application/pdf"
attachment.filename = "balance_001.pdf"
attachment.disposition = "attachment"
attachment.content_id = "Balance Sheet"
return attachment
def build_attachment2():
"""Build attachment mock."""
attachment = Attachment()
attachment.content = "BwdW"
attachment.type = "image/png"
attachment.filename = "banner.png"
attachment.disposition = "inline"
attachment.content_id = "Banner"
return attachment
def build_mail_settings():
"""Build mail settings mock."""
mail_settings = MailSettings()
mail_settings.bcc_settings = BCCSettings(True, Email("test@example.com"))
mail_settings.bypass_list_management = BypassListManagement(True)
mail_settings.footer_settings = FooterSettings(True, "Footer Text",
("<html><body>Footer "
"Text</body></html>"))
mail_settings.sandbox_mode = SandBoxMode(True)
mail_settings.spam_check = SpamCheck(True, 1,
"https://spamcatcher.sendgrid.com")
return mail_settings
def build_tracking_settings():
"""Build tracking settings mock."""
tracking_settings = TrackingSettings()
tracking_settings.click_tracking = ClickTracking(True, True)
tracking_settings.open_tracking = OpenTracking(True,
("Optional tag to "
"replace with the"
"open image in the "
"body of the message"))
subs_track = SubscriptionTracking(True,
("text to insert into the "
"text/plain portion of the"
" message"),
("<html><body>html to insert "
"into the text/html portion of "
"the message</body></html>"),
("Optional tag to replace with "
"the open image in the body of "
"the message"))
tracking_settings.subscription_tracking = subs_track
tracking_settings.ganalytics = Ganalytics(True, "some source",
"some medium", "some term",
"some_content", "some_campaign")
return tracking_settings
def build_kitchen_sink():
"""All settings set"""
mail = Mail()
mail.from_email = Email("test@example.com", "Example User")
mail.subject = "Hello World from the SendGrid Python Library"
personalization = get_mock_personalization_dict()
mail.add_personalization(build_personalization(personalization))
mail.add_personalization(build_personalization(personalization))
mail.add_content(Content("text/plain", "some text here"))
mail.add_content(Content("text/html", ("<html><body>some text "
"here</body></html>")))
mail.add_attachment(build_attachment1())
mail.add_attachment(build_attachment2())
mail.template_id = "13b8f94f-bcae-4ec6-b752-70d6cb59f932"
mail.add_section(Section("%section1%", "Substitution Text for Section 1"))
mail.add_section(Section("%section2%", "Substitution Text for Section 2"))
mail.add_header(Header("X-Test1", "test1"))
mail.add_header(Header("X-Test3", "test2"))
mail.add_category(Category("May"))
mail.add_category(Category("2016"))
mail.add_custom_arg(CustomArg("campaign", "welcome"))
mail.add_custom_arg(CustomArg("weekday", "morning"))
mail.send_at = 1443636842
# This must be a valid [batch ID]
# (https://sendgrid.com/docs/API_Reference/SMTP_API/scheduling_parameters.html) to work
# mail.set_batch_id("N2VkYjBjYWItMGU4OC0xMWU2LWJhMzYtZjQ1Yzg5OTBkNzkxLWM5ZTUyZjNhOA")
mail.asm = ASM(99, [4, 5, 6, 7, 8])
mail.ip_pool_name = "24"
mail.mail_settings = build_mail_settings()
mail.tracking_settings = build_tracking_settings()
mail.reply_to = Email("test@example.com")
return mail.get()
def send_hello_email():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/master/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
sg = SendGridAPIClient()
data = build_hello_email()
response = sg.client.mail.send.post(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
def send_kitchen_sink():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/master/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
sg = SendGridAPIClient()
data = build_kitchen_sink()
response = sg.client.mail.send.post(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
# this will actually send an email
send_hello_email()
# this will only send an email if you set SandBox Mode to False
send_kitchen_sink()
|
|
"""Algorithms for partial fraction decomposition of rational functions. """
from __future__ import print_function, division
from sympy.polys import Poly, RootSum, cancel, factor
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.polys.polyoptions import allowed_flags, set_defaults
from sympy.polys.polyerrors import PolynomialError
from sympy.core import S, Add, sympify, Function, Lambda, Dummy
from sympy.core.basic import preorder_traversal
from sympy.utilities import numbered_symbols, take, xthreaded, public
from sympy.core.compatibility import range
@xthreaded
@public
def apart(f, x=None, full=False, **options):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f``, computes the partial fraction
decomposition of ``f``. Two algorithms are available: One is based on the
undertermined coefficients method, the other is Bronstein's full partial
fraction decomposition algorithm.
The undetermined coefficients method (selected by ``full=False``) uses
polynomial factorization (and therefore accepts the same options as
factor) for the denominator. Per default it works over the rational
numbers, therefore decomposition of denominators with non-rational roots
(e.g. irrational, complex roots) is not supported by default (see options
of factor).
Bronstein's algorithm can be selected by using ``full=True`` and allows a
decomposition of denominators with non-rational roots. A human-readable
result can be obtained via ``doit()`` (see examples below).
Examples
========
>>> from sympy.polys.partfrac import apart
>>> from sympy.abc import x, y
By default, using the undetermined coefficients method:
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
The undetermined coefficients method does not provide a result when the
denominators roots are not rational:
>>> apart(y/(x**2 + x + 1), x)
y/(x**2 + x + 1)
You can choose Bronstein's algorithm by setting ``full=True``:
>>> apart(y/(x**2 + x + 1), x, full=True)
RootSum(_w**2 + _w + 1, Lambda(_a, (-2*_a*y/3 - y/3)/(-_a + x)))
Calling ``doit()`` yields a human-readable result:
>>> apart(y/(x**2 + x + 1), x, full=True).doit()
(-y/3 - 2*y*(-1/2 - sqrt(3)*I/2)/3)/(x + 1/2 + sqrt(3)*I/2) + (-y/3 -
2*y*(-1/2 + sqrt(3)*I/2)/3)/(x + 1/2 - sqrt(3)*I/2)
See Also
========
apart_list, assemble_partfrac_list
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
_options = options.copy()
options = set_defaults(options, extension=True)
try:
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
except PolynomialError as msg:
if f.is_commutative:
raise PolynomialError(msg)
# non-commutative
if f.is_Mul:
c, nc = f.args_cnc(split_1=False)
nc = f.func(*[apart(i, x=x, full=full, **_options) for i in nc])
if c:
c = apart(f.func._from_args(c), x=x, full=full, **_options)
return c*nc
else:
return nc
elif f.is_Add:
c = []
nc = []
for i in f.args:
if i.is_commutative:
c.append(i)
else:
try:
nc.append(apart(i, x=x, full=full, **_options))
except NotImplementedError:
nc.append(i)
return apart(f.func(*c), x=x, full=full, **_options) + f.func(*nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
try:
reps.append((e, apart(e, x=x, full=full, **_options)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
if P.is_multivariate:
fc = f.cancel()
if fc != f:
return apart(fc, x=x, full=full, **_options)
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = S.Zero
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients. """
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in range(1, k + 1):
coeffs, q = take(X, n), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.get_domain().inject(*symbols)
F = Poly(0, Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], S(0)
for (k,), coeff in F.terms():
system.append(coeff - P.nth(k))
from sympy.solvers import solve
solution = solve(system, symbols)
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
1. [Bronstein93]_
"""
return assemble_partfrac_list(apart_list(P/Q, P.gens[0]))
@public
def apart_list(f, x=None, dummies=None, **options):
"""
Compute partial fraction decomposition of a rational function
and return the result in structured form.
Given a rational function ``f`` compute the partial fraction decomposition
of ``f``. Only Bronstein's full partial fraction decomposition algorithm
is supported by this method. The return value is highly structured and
perfectly suited for further algorithmic treatment rather than being
human-readable. The function returns a tuple holding three elements:
* The first item is the common coefficient, free of the variable `x` used
for decomposition. (It is an element of the base field `K`.)
* The second item is the polynomial part of the decomposition. This can be
the zero polynomial. (It is an element of `K[x]`.)
* The third part itself is a list of quadruples. Each quadruple
has the following elements in this order:
- The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear
in the linear denominator of a bunch of related fraction terms. (This item
can also be a list of explicit roots. However, at the moment ``apart_list``
never returns a result this way, but the related ``assemble_partfrac_list``
function accepts this format as input.)
- The numerator of the fraction, written as a function of the root `w`
- The linear denominator of the fraction *excluding its power exponent*,
written as a function of the root `w`.
- The power to which the denominator has to be raised.
On can always rebuild a plain expression by using the function ``assemble_partfrac_list``.
Examples
========
A first example:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, t
>>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(2*x + 4, x, domain='ZZ'),
[(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
2*x + 4 + 4/(x - 1)
Second example:
>>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
>>> pfd = apart_list(f)
>>> pfd
(-1,
Poly(2/3, x, domain='QQ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-2/3 - 2/(x - 2)
Another example, showing symbolic parameters:
>>> pfd = apart_list(t/(x**2 + x + t), x)
>>> pfd
(1,
Poly(0, x, domain='ZZ[t]'),
[(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'),
Lambda(_a, -2*_a*t/(4*t - 1) - t/(4*t - 1)),
Lambda(_a, -_a + x),
1)])
>>> assemble_partfrac_list(pfd)
RootSum(_w**2 + _w + t, Lambda(_a, (-2*_a*t/(4*t - 1) - t/(4*t - 1))/(-_a + x)))
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
See also
========
apart, assemble_partfrac_list
References
==========
1. [Bronstein93]_
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate:
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
polypart = poly
if dummies is None:
def dummies(name):
d = Dummy(name)
while True:
yield d
dummies = dummies("w")
rationalpart = apart_list_full_decomposition(P, Q, dummies)
return (common, polypart, rationalpart)
def apart_list_full_decomposition(P, Q, dummygen):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
1. [Bronstein93]_
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
partial = []
for d, n in Q.sqf_list_include(all=True):
b = d.as_expr()
U += [ u.diff(x, n - 1) ]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [ H[-1].diff(x) / j ]
for j in range(1, n + 1):
subs += [ (U[j - 1], b.diff(x, j) / j) ]
for j in range(0, n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(0, j + 1):
P = P.subs(*subs[j - i])
Q = Q.subs(*subs[0])
P = Poly(P, x)
Q = Poly(Q, x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
Dw = D.subs(x, next(dummygen))
numer = Lambda(a, b.as_expr().subs(x, a))
denom = Lambda(a, (x - a))
exponent = n-j
partial.append((Dw, numer, denom, exponent))
return partial
@public
def assemble_partfrac_list(partial_list):
r"""Reassemble a full partial fraction decomposition
from a structured result obtained by the function ``apart_list``.
Examples
========
This example is taken from Bronstein's original paper:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, y
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
If we happen to know some roots we can provide them easily inside the structure:
>>> pfd = apart_list(2/(x**2-2))
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w**2 - 2, _w, domain='ZZ'),
Lambda(_a, _a/2),
Lambda(_a, -_a + x),
1)])
>>> pfda = assemble_partfrac_list(pfd)
>>> pfda
RootSum(_w**2 - 2, Lambda(_a, _a/(-_a + x)))/2
>>> pfda.doit()
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
>>> from sympy import Dummy, Poly, Lambda, sqrt
>>> a = Dummy("a")
>>> pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
>>> assemble_partfrac_list(pfd)
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
See also
========
apart, apart_list
"""
# Common factor
common = partial_list[0]
# Polynomial part
polypart = partial_list[1]
pfd = polypart.as_expr()
# Rational parts
for r, nf, df, ex in partial_list[2]:
if isinstance(r, Poly):
# Assemble in case the roots are given implicitly by a polynomials
an, nu = nf.variables, nf.expr
ad, de = df.variables, df.expr
# Hack to make dummies equal because Lambda created new Dummies
de = de.subs(ad[0], an[0])
func = Lambda(an, nu/de**ex)
pfd += RootSum(r, func, auto=False, quadratic=False)
else:
# Assemble in case the roots are given explicitely by a list of algebraic numbers
for root in r:
pfd += nf(root)/df(root)**ex
return common*pfd
|
|
from __future__ import absolute_import
import logging
from uuid import uuid4
from django.apps import apps
from django.db import transaction
from django.utils import timezone
from sentry.constants import ObjectStatus
from sentry.exceptions import DeleteAborted
from sentry.signals import pending_delete
from sentry.tasks.base import instrumented_task, retry, track_group_async_operation
logger = logging.getLogger(__name__)
MAX_RETRIES = 5
@instrumented_task(name="sentry.tasks.deletion.run_scheduled_deletions", queue="cleanup")
def run_scheduled_deletions():
from sentry.models import ScheduledDeletion
queryset = ScheduledDeletion.objects.filter(
in_progress=False, aborted=False, date_scheduled__lte=timezone.now()
)
for item in queryset:
with transaction.atomic():
affected = ScheduledDeletion.objects.filter(
id=item.id, in_progress=False, aborted=False
).update(in_progress=True)
if not affected:
continue
run_deletion.delay(deletion_id=item.id)
@instrumented_task(
name="sentry.tasks.deletion.run_deletion",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def run_deletion(deletion_id):
from sentry import deletions
from sentry.models import ScheduledDeletion
try:
deletion = ScheduledDeletion.objects.get(id=deletion_id)
except ScheduledDeletion.DoesNotExist:
return
if deletion.aborted:
raise DeleteAborted
if not deletion.in_progress:
actor = deletion.get_actor()
instance = deletion.get_instance()
with transaction.atomic():
deletion.update(in_progress=True)
pending_delete.send(sender=type(instance), instance=instance, actor=actor)
task = deletions.get(
model=deletion.get_model(),
query={"id": deletion.object_id},
transaction_id=deletion.guid,
actor_id=deletion.actor_id,
)
has_more = task.chunk()
if has_more:
run_deletion.apply_async(kwargs={"deletion_id": deletion_id}, countdown=15)
deletion.delete()
@instrumented_task(
name="sentry.tasks.deletion.revoke_api_tokens",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):
from sentry.models import ApiToken
queryset = ApiToken.objects.filter(application=object_id)
if timestamp:
queryset = queryset.filter(date_added__lte=timestamp)
# we're using a slow deletion strategy to avoid a lot of custom code for
# postgres
has_more = False
for obj in queryset[:1000]:
obj.delete()
has_more = True
if has_more:
revoke_api_tokens.apply_async(
kwargs={
"object_id": object_id,
"transaction_id": transaction_id,
"timestamp": timestamp,
},
countdown=15,
)
return has_more
@instrumented_task(
name="sentry.tasks.deletion.delete_organization",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):
from sentry import deletions
from sentry.models import Organization, OrganizationStatus
try:
instance = Organization.objects.get(id=object_id)
except Organization.DoesNotExist:
return
if instance.status == OrganizationStatus.VISIBLE:
raise DeleteAborted
# compat: can be removed after we switch to scheduled deletions
if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:
pending_delete.send(sender=type(instance), instance=instance)
task = deletions.get(
model=Organization,
query={"id": object_id},
transaction_id=transaction_id or uuid4().hex,
actor_id=actor_id,
)
has_more = task.chunk()
if has_more:
delete_organization.apply_async(
kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
countdown=15,
)
@instrumented_task(
name="sentry.tasks.deletion.delete_team",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def delete_team(object_id, transaction_id=None, **kwargs):
from sentry import deletions
from sentry.models import Team, TeamStatus
try:
instance = Team.objects.get(id=object_id)
except Team.DoesNotExist:
return
if instance.status == TeamStatus.VISIBLE:
raise DeleteAborted
task = deletions.get(
model=Team, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
)
has_more = task.chunk()
if has_more:
delete_team.apply_async(
kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
)
@instrumented_task(
name="sentry.tasks.deletion.delete_project",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def delete_project(object_id, transaction_id=None, **kwargs):
from sentry import deletions
from sentry.models import Project, ProjectStatus
try:
instance = Project.objects.get(id=object_id)
except Project.DoesNotExist:
return
if instance.status == ProjectStatus.VISIBLE:
raise DeleteAborted
task = deletions.get(
model=Project, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
)
has_more = task.chunk()
if has_more:
delete_project.apply_async(
kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
)
@instrumented_task(
name="sentry.tasks.deletion.delete_groups",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
@track_group_async_operation
def delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):
from sentry import deletions, eventstream
from sentry.models import Group
transaction_id = transaction_id or uuid4().hex
max_batch_size = 100
current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]
task = deletions.get(
model=Group, query={"id__in": current_batch}, transaction_id=transaction_id
)
has_more = task.chunk()
if has_more or rest:
delete_groups.apply_async(
kwargs={
"object_ids": object_ids if has_more else rest,
"transaction_id": transaction_id,
"eventstream_state": eventstream_state,
},
countdown=15,
)
else:
# all groups have been deleted
if eventstream_state:
eventstream.end_delete_groups(eventstream_state)
@instrumented_task(
name="sentry.tasks.deletion.delete_api_application",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def delete_api_application(object_id, transaction_id=None, **kwargs):
from sentry import deletions
from sentry.models import ApiApplication, ApiApplicationStatus
try:
instance = ApiApplication.objects.get(id=object_id)
except ApiApplication.DoesNotExist:
return
if instance.status == ApiApplicationStatus.active:
raise DeleteAborted
task = deletions.get(
model=ApiApplication, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
)
has_more = task.chunk()
if has_more:
delete_api_application.apply_async(
kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
)
@instrumented_task(
name="sentry.tasks.deletion.generic_delete",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):
from sentry import deletions
from sentry.models import User
model = apps.get_model(app_label, model_name)
try:
instance = model.objects.get(id=object_id)
except model.DoesNotExist:
return
if instance.status != ObjectStatus.DELETION_IN_PROGRESS:
pending_delete.send(
sender=type(instance),
instance=instance,
actor=User.objects.get(id=actor_id) if actor_id else None,
)
if instance.status == ObjectStatus.VISIBLE:
raise DeleteAborted
task = deletions.get(
model=model,
actor_id=actor_id,
query={"id": object_id},
transaction_id=transaction_id or uuid4().hex,
)
has_more = task.chunk()
if has_more:
generic_delete.apply_async(
kwargs={
"app_label": app_label,
"model_name": model_name,
"object_id": object_id,
"transaction_id": transaction_id,
"actor_id": actor_id,
},
countdown=15,
)
@instrumented_task(
name="sentry.tasks.deletion.delete_repository",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):
from sentry import deletions
from sentry.models import Repository, User
try:
instance = Repository.objects.get(id=object_id)
except Repository.DoesNotExist:
return
if instance.status == ObjectStatus.VISIBLE:
raise DeleteAborted
# compat: can be removed after we switch to scheduled deletions
if instance.status != ObjectStatus.DELETION_IN_PROGRESS:
pending_delete.send(
sender=type(instance),
instance=instance,
actor=User.objects.get(id=actor_id) if actor_id else None,
)
task = deletions.get(
model=Repository,
actor_id=actor_id,
query={"id": object_id},
transaction_id=transaction_id or uuid4().hex,
)
has_more = task.chunk()
if has_more:
delete_repository.apply_async(
kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
countdown=15,
)
@instrumented_task(
name="sentry.tasks.deletion.delete_organization_integration",
queue="cleanup",
default_retry_delay=60 * 5,
max_retries=MAX_RETRIES,
)
@retry(exclude=(DeleteAborted,))
def delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):
from sentry import deletions
from sentry.models import OrganizationIntegration, Repository, Identity
try:
instance = OrganizationIntegration.objects.get(id=object_id)
except OrganizationIntegration.DoesNotExist:
return
if instance.status == ObjectStatus.VISIBLE:
raise DeleteAborted
# dissociate repos from that integration
Repository.objects.filter(
organization_id=instance.organization_id, integration_id=instance.integration_id
).update(integration_id=None)
# delete the identity attached through the default_auth_id
if instance.default_auth_id:
log_info = {
"integration_id": instance.integration_id,
"identity_id": instance.default_auth_id,
}
try:
identity = Identity.objects.get(id=instance.default_auth_id)
except Identity.DoesNotExist:
# the identity may not exist for a variety of reasons but for debugging puproses
# we should keep track
logger.info("delete_organization_integration.identity_does_not_exist", extra=log_info)
else:
identity.delete()
logger.info("delete_organization_integration.identity_deleted", extra=log_info)
task = deletions.get(
model=OrganizationIntegration,
actor_id=actor_id,
query={"id": object_id},
transaction_id=transaction_id or uuid4().hex,
)
has_more = task.chunk()
if has_more:
delete_organization_integration.apply_async(
kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
countdown=15,
)
|
|
"""
Created: 11 November 2016
Last Updated: 16 February 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
Base class for plotting deep learning
Designed for running on desktop at TAMU
with specific set of software installed
--> not guaranteed to work in CMSSW environment!
Does not use ROOT!
Instead, uses matplotlib to generate figures
"""
import os
import sys
import json
import util
from datetime import date
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', family='sans-serif')
from keras.utils.vis_utils import plot_model as keras_plot
from sklearn.metrics import roc_curve, auc
import hepPlotter.hepPlotterLabels as hpl
import hepPlotter.hepPlotterTools as hpt
from hepPlotter.hepPlotter import HepPlotter
class Target(object):
"""Class to contain information for targets used in training"""
def __init__(self,name=""):
self.name = name # Name of this target, e.g., 'signal'
self.df = None # dataframe of this target's features
self.color = 'k'
self.label = ''
self.target_value = -999
self.binning = 1
class DeepLearningPlotter(object):
"""Plotting utilities for deep learning"""
def __init__(self):
"""Give default values to member variables"""
self.date = date.today().strftime('%d%b%Y')
self.betterColors = hpt.betterColors()['linecolors']
self.sample_labels = hpl.sample_labels()
self.variable_labels = hpl.variable_labels()
self.msg_svc = util.VERBOSE()
self.filename = ""
self.output_dir = ''
self.image_format = 'png'
self.process_label = '' # if a single process is used for all training, set this
self.classification = False # 'binary','multi',False
self.regression = False # True or False
self.df = None
self.targets = []
self.CMSlabelStatus = "Internal"
def initialize(self,dataframe,target_names=[],target_values=[]):
"""
Set parameters of class to make plots
@param dataframe The dataframe that contains physics information for training/testing
"""
self.df = dataframe
try:
self.processlabel = self.sample_labels[self.filename].label # process used in each plot
except KeyError:
self.processlabel = ''
if self.classification:
for i,(n,v) in enumerate(zip(target_names,target_values)):
tmp = Target(n)
tmp.df = self.df.loc[self.df['target']==v]
tmp.target_value = v
tmp.label = self.sample_labels[n].label
tmp.color = self.betterColors[i]
self.targets.append(tmp)
else: # regression
try:
tmp = Target(target_names[0])
tmp.df = self.df.loc[self.df['target']==target_values[0]]
tmp.target_value = target_values[0]
except TypeError:
tmp = Target(target_names)
tmp.df = self.df.loc[self.df['target']==target_values]
tmp.target_value = target_values
tmp.label = self.sample_labels[tmp.name].label
tmp.color = self.betterColors[i]
self.targets.append(tmp)
return
def features(self):
"""
Plot the features
For classification, compare different targets
For regression, just plot the features <- should do data/mc plots instead!
"""
self.msg_svc.INFO("DL : Plotting features.")
target0 = self.targets[0] # hard-coded for binary comparisons
target1 = self.targets[1]
plt_features = self.df.keys()
for hi,feature in enumerate(plt_features):
if feature=='target': continue
binning = self.variable_labels[feature].binning
hist = HepPlotter("histogram",1)
hist.normed = True
hist.stacked = False
hist.logplot = {"y":False,"x":False,"data":False}
hist.binning = binning
hist.x_label = self.variable_labels[feature].label
hist.y_label = "Events"
hist.format = self.image_format
hist.saveAs = self.output_dir+"/hist_"+feature+"_"+self.date
hist.ratio_plot = True
hist.ratio_type = 'ratio'
hist.y_ratio_label = '{0}/{1}'.format(target0.label,target1.label)
hist.CMSlabel = 'top left'
hist.CMSlabelStatus = self.CMSlabelStatus
hist.numLegendColumns = 1
# Add some extra text to the plot
if self.processlabel: hist.extra_text.Add(self.processlabel,coords=[0.03,0.80]) # physics process that produces these features
hist.initialize()
hist.Add(target0.df[feature], name=target0.name, draw='step',
linecolor=target0.color, label=target0.label,
ratio_num=True,ratio_den=False,ratio_partner=target1.name)
hist.Add(target1.df[feature], name=target1.name, draw='step',
linecolor=target1.color, label=target1.label,
ratio_num=False,ratio_den=True,ratio_partner=target0.name)
if self.classification=='binary':
t0,_ = np.histogram(target0.df[feature],bins=binning,normed=True)
t1,_ = np.histogram(target1.df[feature],bins=binning,normed=True)
separation = util.getSeparation(t0,t1)
hist.extra_text.Add("Separation = {0:.4f}".format(separation),coords=[0.03,0.73])
p = hist.execute()
hist.savefig()
return
def feature_correlations(self):
"""Plot correlations between features of the NN"""
## Correlation Matrices of Features (top/antitop) ##
fontProperties = {'family':'sans-serif'}
opts = {'cmap': plt.get_cmap("bwr"), 'vmin': -1, 'vmax': +1}
for c,target in enumerate(self.targets):
saveAs = "{0}/correlations_{1}_{2}".format(self.output_dir,target.name,self.date)
allkeys = target.df.keys()
keys = []
for key in allkeys:
if key!='target': keys.append(key)
t_ = target.df[keys]
corrmat = t_.corr()
# Save correlation matrix to CSV file
corrmat.to_csv("{0}.csv".format(saveAs))
# Use matplotlib directly
fig,ax = plt.subplots()
heatmap1 = ax.pcolor(corrmat, **opts)
cbar = plt.colorbar(heatmap1, ax=ax)
cbar.ax.set_yticklabels( [i.get_text().strip('$') for i in cbar.ax.get_yticklabels()], **fontProperties )
labels = corrmat.columns.values
labels = [i.replace('_','\_') for i in labels]
# shift location of ticks to center of the bins
ax.set_xticks(np.arange(len(labels))+0.5, minor=False)
ax.set_yticks(np.arange(len(labels))+0.5, minor=False)
ax.set_xticklabels(labels, fontProperties, fontsize=18, minor=False, ha='right', rotation=70)
ax.set_yticklabels(labels, fontProperties, fontsize=18, minor=False)
## CMS/COM Energy Label + Signal name
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.02,1.00]
cms_stamp.fontsize = 16
cms_stamp.va = 'bottom'
ax.text(0.02,1.00,cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.ha = 'right'
energy_stamp.coords = [0.99,1.00]
energy_stamp.fontsize = 16
energy_stamp.va = 'bottom'
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
ax.text(0.03,0.93,target.label,fontsize=16,ha='left',va='bottom',transform=ax.transAxes)
plt.savefig("{0}.{1}".format(saveAs,self.image_format),
format=self.image_format,dpi=300,bbox_inches='tight')
plt.close()
return
def prediction(self,train_data={},test_data={}):
"""Plot the training and testing predictions"""
self.msg_svc.INFO("DL : Plotting DNN prediction. ")
# Plot all k-fold cross-validation results
for i,(train,trainY,test,testY) in enumerate(zip(train_data['X'],train_data['Y'],test_data['X'],test_data['Y'])):
hist = HepPlotter("histogram",1)
hist.ratio_plot = True
hist.ratio_type = "ratio"
hist.y_ratio_label = "Test/Train"
hist.label_size = 14
hist.normed = True # compare shape differences (likely don't have the same event yield)
hist.format = self.image_format
hist.saveAs = "{0}/hist_DNN_prediction_kfold{1}_{2}".format(self.output_dir,i,self.date)
hist.binning = [bb/10. for bb in range(11)]
hist.stacked = False
hist.logplot = {"y":False,"x":False,"data":False}
hist.x_label = "Prediction"
hist.y_label = "Arb. Units"
hist.CMSlabel = 'top left'
hist.CMSlabelStatus = self.CMSlabelStatus
hist.numLegendColumns = 1
if self.processlabel: hist.extra_text.Add(self.processlabel,coords=[0.03,0.80],fontsize=14)
hist.initialize()
test_data = []
train_data = []
json_data = {}
for t,target in enumerate(self.targets):
## Training
target_value = target.target_value
hist.Add(train[ trainY==target_value ],
name=target.name+'_train', linecolor=target.color,
linewidth=2, draw='step', label=target.label+" Train",
ratio_den=True,ratio_num=False,ratio_partner=target.name+'_test')
## Testing
hist.Add(test[ testY==target_value ],
name=target.name+'_test', linecolor=target.color, color=target.color,
linewidth=0, draw='stepfilled', label=target.label+" Test", alpha=0.5,
ratio_den=False,ratio_num=True,ratio_partner=target.name+'_train')
## Save data to JSON file
json_data[target.name+"_train"] = {}
json_data[target.name+"_test"] = {}
d_tr,b_tr = np.histogram(train[trainY==target_value],bins=hist.binning)
d_te,b_te = np.histogram(test[testY==target_value], bins=hist.binning)
json_data[target.name+"_train"]["binning"] = b_tr.tolist()
json_data[target.name+"_train"]["content"] = d_tr.tolist()
json_data[target.name+"_test"]["binning"] = b_te.tolist()
json_data[target.name+"_test"]["content"] = d_te.tolist()
test_data.append(d_te.tolist())
train_data.append(d_tr.tolist())
separation = util.getSeparation(test_data[0],test_data[1])
hist.extra_text.Add("Test Separation = {0:.4f}".format(separation),coords=[0.03,0.72])
p = hist.execute()
hist.savefig()
# save results to JSON file (just histogram values & bins) to re-make plots
with open("{0}.json".format(hist.saveAs), 'w') as outfile:
json.dump(json_data, outfile)
return
def ROC(self,fprs=[],tprs=[],accuracy={}):
"""Plot the ROC curve & save to text file"""
self.msg_svc.INFO("DL : Plotting ROC curve.")
saveAs = "{0}/roc_curve_{1}".format(self.output_dir,self.date)
## Use matplotlib directly
fig,ax = plt.subplots()
# Draw all of the ROC curves from the K-fold cross-validation
ax.plot([0, 1], [0, 1], ls='--',label='No Discrimination',lw=2,c='gray')
ax.axhline(y=1,lw=1,c='lightgray',ls='--')
for ft,(fpr,tpr) in enumerate(zip(fprs,tprs)):
roc_auc = auc(fpr,tpr)
ax.plot(fpr,tpr,label='K-fold {0} (AUC = {1:.2f})'.format(ft,roc_auc),lw=2)
# save ROC curve to CSV file (to plot later)
outfile_name = "{0}_{1}.csv".format(saveAs,ft)
csv = [ "{0},{1}".format(fp,tp) for fp,tp in zip(fpr,tpr) ]
util.to_csv(outfile_name,csv)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.5])
ax.set_xlabel(r'$\epsilon$(anti-top)',fontsize=22,ha='right',va='top',position=(1,0))
ax.set_xticklabels(["{0:.1f}".format(i) for i in ax.get_xticks()],fontsize=22)
ax.set_ylabel(r'$\epsilon$(top)',fontsize=22,ha='right',va='bottom',position=(0,1))
ax.set_yticklabels(['']+["{0:.1f}".format(i) for i in ax.get_yticks()[1:-1]]+[''],fontsize=22)
## CMS/COM Energy Label
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.03,0.97]
cms_stamp.fontsize = 16
ax.text(cms_stamp.coords[0],cms_stamp.coords[1],cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.coords = [0.03,0.90]
energy_stamp.fontsize = 16
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
text_args = {'ha':'left','va':'top','fontsize':18,'transform':ax.transAxes}
if self.processlabel: ax.text(0.03,0.82,self.processlabel,**text_args)
if accuracy: ax.text(0.03,0.75,r"Accuracy = {0:.2f}$\pm${1:.2f}".format(accuracy['mean'],accuracy['std']),**text_args)
leg = ax.legend(loc=4,numpoints=1,fontsize=12,ncol=1,columnspacing=0.3)
leg.draw_frame(False)
plt.savefig('{0}.{1}'.format(saveAs,self.image_format),
format=self.image_format,bbox_inches='tight',dpi=300)
plt.close()
return
def plot_loss_history(self,history,ax=None,index=-1):
"""Draw history of model"""
loss = history.history['loss']
x = range(1,len(loss)+1)
label = 'Loss {0}'.format(index) if index>=0 else 'Loss'
ax.plot(x,loss,label=label)
csv = [ "{0},{1}".format(i,j) for i,j in zip(x,loss) ]
return csv
def loss_history(self,history,kfold=0,val_loss=0.0):
"""Plot loss as a function of epoch for model"""
self.msg_svc.INFO("DL : Plotting loss as a function of epoch number.")
saveAs = "{0}/loss_epochs_{1}".format(self.output_dir,self.date)
all_histories = type(history)==list
# draw the loss curve
fig,ax = plt.subplots()
# also save the data to a CSV file
if all_histories:
for i,h in enumerate(history):
csv = self.plot_loss_history(h,ax=ax,index=i)
filename = "{0}_{1}.csv".format(saveAs,i)
util.to_csv(filename,csv)
else:
csv = self.plot_loss_history(history,ax=ax)
filename = "{0}.csv".format(saveAs)
util.to_csv(filename,csv)
ax.set_xlabel('Epoch',fontsize=22,ha='right',va='top',position=(1,0))
ax.set_xticklabels(["{0:.1f}".format(i) for i in ax.get_xticks()],fontsize=22)
ax.set_ylabel('Loss',fontsize=22,ha='right',va='bottom',position=(0,1))
ax.set_yticklabels(['']+["{0:.1f}".format(i) for i in ax.get_yticks()[1:-1]]+[''],fontsize=22)
## CMS/COM Energy Label
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.03,0.97]
cms_stamp.fontsize = 18
ax.text(cms_stamp.coords[0],cms_stamp.coords[1],cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.coords = [0.03,0.90]
energy_stamp.fontsize = 18
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
text_args = {'ha':'left','va':'top','fontsize':18,'transform':ax.transAxes}
text = "Validation Loss = {0}; {1} K-folds".format(val_loss,len(history)) if all_histories else "Validation Loss = {0}".format(val_loss)
ax.text(0.03,0.76,text,**text_args)
leg = ax.legend(loc=1,numpoints=1,fontsize=12,ncol=1,columnspacing=0.3)
leg.draw_frame(False)
f = lambda x,pos: str(x).rstrip('0').rstrip('.')
ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(f))
plt.savefig('{0}.{1}'.format(saveAs,self.image_format),
format=self.image_format,bbox_inches='tight',dpi=200)
plt.close()
return
def model(self,model,name):
"""Plot the model architecture to view later"""
keras_plot(model,to_file='{0}/{1}_model.eps'.format(self.output_dir,name),show_shapes=True)
return
## THE END ##
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.pubsub_v1.types import pubsub
from .base import SubscriberTransport, DEFAULT_CLIENT_INFO
class SubscriberGrpcTransport(SubscriberTransport):
"""gRPC backend transport for Subscriber.
The service that an application uses to manipulate subscriptions and
to consume messages from a subscription via the ``Pull`` method or
by establishing a bi-directional stream using the ``StreamingPull``
method.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "pubsub.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
("grpc.keepalive_time_ms", 30000),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "pubsub.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def create_subscription(
self,
) -> Callable[[pubsub.Subscription], pubsub.Subscription]:
r"""Return a callable for the create subscription method over gRPC.
Creates a subscription to a given topic. See the [resource name
rules]
(https://cloud.google.com/pubsub/docs/admin#resource_names). If
the subscription already exists, returns ``ALREADY_EXISTS``. If
the corresponding topic doesn't exist, returns ``NOT_FOUND``.
If the name is not provided in the request, the server will
assign a random name for this subscription on the same project
as the topic, conforming to the [resource name format]
(https://cloud.google.com/pubsub/docs/admin#resource_names). The
generated name is populated in the returned Subscription object.
Note that for REST API requests, you must specify a name in the
request.
Returns:
Callable[[~.Subscription],
~.Subscription]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_subscription" not in self._stubs:
self._stubs["create_subscription"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/CreateSubscription",
request_serializer=pubsub.Subscription.serialize,
response_deserializer=pubsub.Subscription.deserialize,
)
return self._stubs["create_subscription"]
@property
def get_subscription(
self,
) -> Callable[[pubsub.GetSubscriptionRequest], pubsub.Subscription]:
r"""Return a callable for the get subscription method over gRPC.
Gets the configuration details of a subscription.
Returns:
Callable[[~.GetSubscriptionRequest],
~.Subscription]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_subscription" not in self._stubs:
self._stubs["get_subscription"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/GetSubscription",
request_serializer=pubsub.GetSubscriptionRequest.serialize,
response_deserializer=pubsub.Subscription.deserialize,
)
return self._stubs["get_subscription"]
@property
def update_subscription(
self,
) -> Callable[[pubsub.UpdateSubscriptionRequest], pubsub.Subscription]:
r"""Return a callable for the update subscription method over gRPC.
Updates an existing subscription. Note that certain
properties of a subscription, such as its topic, are not
modifiable.
Returns:
Callable[[~.UpdateSubscriptionRequest],
~.Subscription]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_subscription" not in self._stubs:
self._stubs["update_subscription"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/UpdateSubscription",
request_serializer=pubsub.UpdateSubscriptionRequest.serialize,
response_deserializer=pubsub.Subscription.deserialize,
)
return self._stubs["update_subscription"]
@property
def list_subscriptions(
self,
) -> Callable[[pubsub.ListSubscriptionsRequest], pubsub.ListSubscriptionsResponse]:
r"""Return a callable for the list subscriptions method over gRPC.
Lists matching subscriptions.
Returns:
Callable[[~.ListSubscriptionsRequest],
~.ListSubscriptionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_subscriptions" not in self._stubs:
self._stubs["list_subscriptions"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/ListSubscriptions",
request_serializer=pubsub.ListSubscriptionsRequest.serialize,
response_deserializer=pubsub.ListSubscriptionsResponse.deserialize,
)
return self._stubs["list_subscriptions"]
@property
def delete_subscription(
self,
) -> Callable[[pubsub.DeleteSubscriptionRequest], empty_pb2.Empty]:
r"""Return a callable for the delete subscription method over gRPC.
Deletes an existing subscription. All messages retained in the
subscription are immediately dropped. Calls to ``Pull`` after
deletion will return ``NOT_FOUND``. After a subscription is
deleted, a new one may be created with the same name, but the
new one has no association with the old subscription or its
topic unless the same topic is specified.
Returns:
Callable[[~.DeleteSubscriptionRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_subscription" not in self._stubs:
self._stubs["delete_subscription"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/DeleteSubscription",
request_serializer=pubsub.DeleteSubscriptionRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_subscription"]
@property
def modify_ack_deadline(
self,
) -> Callable[[pubsub.ModifyAckDeadlineRequest], empty_pb2.Empty]:
r"""Return a callable for the modify ack deadline method over gRPC.
Modifies the ack deadline for a specific message. This method is
useful to indicate that more time is needed to process a message
by the subscriber, or to make the message available for
redelivery if the processing was interrupted. Note that this
does not modify the subscription-level ``ackDeadlineSeconds``
used for subsequent messages.
Returns:
Callable[[~.ModifyAckDeadlineRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "modify_ack_deadline" not in self._stubs:
self._stubs["modify_ack_deadline"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/ModifyAckDeadline",
request_serializer=pubsub.ModifyAckDeadlineRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["modify_ack_deadline"]
@property
def acknowledge(self) -> Callable[[pubsub.AcknowledgeRequest], empty_pb2.Empty]:
r"""Return a callable for the acknowledge method over gRPC.
Acknowledges the messages associated with the ``ack_ids`` in the
``AcknowledgeRequest``. The Pub/Sub system can remove the
relevant messages from the subscription.
Acknowledging a message whose ack deadline has expired may
succeed, but such a message may be redelivered later.
Acknowledging a message more than once will not result in an
error.
Returns:
Callable[[~.AcknowledgeRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "acknowledge" not in self._stubs:
self._stubs["acknowledge"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/Acknowledge",
request_serializer=pubsub.AcknowledgeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["acknowledge"]
@property
def pull(self) -> Callable[[pubsub.PullRequest], pubsub.PullResponse]:
r"""Return a callable for the pull method over gRPC.
Pulls messages from the server. The server may return
``UNAVAILABLE`` if there are too many concurrent pull requests
pending for the given subscription.
Returns:
Callable[[~.PullRequest],
~.PullResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "pull" not in self._stubs:
self._stubs["pull"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/Pull",
request_serializer=pubsub.PullRequest.serialize,
response_deserializer=pubsub.PullResponse.deserialize,
)
return self._stubs["pull"]
@property
def streaming_pull(
self,
) -> Callable[[pubsub.StreamingPullRequest], pubsub.StreamingPullResponse]:
r"""Return a callable for the streaming pull method over gRPC.
Establishes a stream with the server, which sends messages down
to the client. The client streams acknowledgements and ack
deadline modifications back to the server. The server will close
the stream and return the status on any error. The server may
close the stream with status ``UNAVAILABLE`` to reassign
server-side resources, in which case, the client should
re-establish the stream. Flow control can be achieved by
configuring the underlying RPC channel.
Returns:
Callable[[~.StreamingPullRequest],
~.StreamingPullResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "streaming_pull" not in self._stubs:
self._stubs["streaming_pull"] = self.grpc_channel.stream_stream(
"/google.pubsub.v1.Subscriber/StreamingPull",
request_serializer=pubsub.StreamingPullRequest.serialize,
response_deserializer=pubsub.StreamingPullResponse.deserialize,
)
return self._stubs["streaming_pull"]
@property
def modify_push_config(
self,
) -> Callable[[pubsub.ModifyPushConfigRequest], empty_pb2.Empty]:
r"""Return a callable for the modify push config method over gRPC.
Modifies the ``PushConfig`` for a specified subscription.
This may be used to change a push subscription to a pull one
(signified by an empty ``PushConfig``) or vice versa, or change
the endpoint URL and other attributes of a push subscription.
Messages will accumulate for delivery continuously through the
call regardless of changes to the ``PushConfig``.
Returns:
Callable[[~.ModifyPushConfigRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "modify_push_config" not in self._stubs:
self._stubs["modify_push_config"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/ModifyPushConfig",
request_serializer=pubsub.ModifyPushConfigRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["modify_push_config"]
@property
def get_snapshot(self) -> Callable[[pubsub.GetSnapshotRequest], pubsub.Snapshot]:
r"""Return a callable for the get snapshot method over gRPC.
Gets the configuration details of a snapshot.
Snapshots are used in <a
href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
operations, which allow you to manage message
acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing
subscription to the state captured by a snapshot.
Returns:
Callable[[~.GetSnapshotRequest],
~.Snapshot]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_snapshot" not in self._stubs:
self._stubs["get_snapshot"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/GetSnapshot",
request_serializer=pubsub.GetSnapshotRequest.serialize,
response_deserializer=pubsub.Snapshot.deserialize,
)
return self._stubs["get_snapshot"]
@property
def list_snapshots(
self,
) -> Callable[[pubsub.ListSnapshotsRequest], pubsub.ListSnapshotsResponse]:
r"""Return a callable for the list snapshots method over gRPC.
Lists the existing snapshots. Snapshots are used in
`Seek <https://cloud.google.com/pubsub/docs/replay-overview>`__
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
Returns:
Callable[[~.ListSnapshotsRequest],
~.ListSnapshotsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_snapshots" not in self._stubs:
self._stubs["list_snapshots"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/ListSnapshots",
request_serializer=pubsub.ListSnapshotsRequest.serialize,
response_deserializer=pubsub.ListSnapshotsResponse.deserialize,
)
return self._stubs["list_snapshots"]
@property
def create_snapshot(
self,
) -> Callable[[pubsub.CreateSnapshotRequest], pubsub.Snapshot]:
r"""Return a callable for the create snapshot method over gRPC.
Creates a snapshot from the requested subscription. Snapshots
are used in
`Seek <https://cloud.google.com/pubsub/docs/replay-overview>`__
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
If the snapshot already exists, returns ``ALREADY_EXISTS``. If
the requested subscription doesn't exist, returns ``NOT_FOUND``.
If the backlog in the subscription is too old -- and the
resulting snapshot would expire in less than 1 hour -- then
``FAILED_PRECONDITION`` is returned. See also the
``Snapshot.expire_time`` field. If the name is not provided in
the request, the server will assign a random name for this
snapshot on the same project as the subscription, conforming to
the [resource name format]
(https://cloud.google.com/pubsub/docs/admin#resource_names). The
generated name is populated in the returned Snapshot object.
Note that for REST API requests, you must specify a name in the
request.
Returns:
Callable[[~.CreateSnapshotRequest],
~.Snapshot]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_snapshot" not in self._stubs:
self._stubs["create_snapshot"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/CreateSnapshot",
request_serializer=pubsub.CreateSnapshotRequest.serialize,
response_deserializer=pubsub.Snapshot.deserialize,
)
return self._stubs["create_snapshot"]
@property
def update_snapshot(
self,
) -> Callable[[pubsub.UpdateSnapshotRequest], pubsub.Snapshot]:
r"""Return a callable for the update snapshot method over gRPC.
Updates an existing snapshot. Snapshots are used in
<a
href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
operations, which allow
you to manage message acknowledgments in bulk. That is,
you can set the acknowledgment state of messages in an
existing subscription to the state captured by a
snapshot.
Returns:
Callable[[~.UpdateSnapshotRequest],
~.Snapshot]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_snapshot" not in self._stubs:
self._stubs["update_snapshot"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/UpdateSnapshot",
request_serializer=pubsub.UpdateSnapshotRequest.serialize,
response_deserializer=pubsub.Snapshot.deserialize,
)
return self._stubs["update_snapshot"]
@property
def delete_snapshot(
self,
) -> Callable[[pubsub.DeleteSnapshotRequest], empty_pb2.Empty]:
r"""Return a callable for the delete snapshot method over gRPC.
Removes an existing snapshot. Snapshots are used in [Seek]
(https://cloud.google.com/pubsub/docs/replay-overview)
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
When the snapshot is deleted, all messages retained in the
snapshot are immediately dropped. After a snapshot is deleted, a
new one may be created with the same name, but the new one has
no association with the old snapshot or its subscription, unless
the same subscription is specified.
Returns:
Callable[[~.DeleteSnapshotRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_snapshot" not in self._stubs:
self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/DeleteSnapshot",
request_serializer=pubsub.DeleteSnapshotRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_snapshot"]
@property
def seek(self) -> Callable[[pubsub.SeekRequest], pubsub.SeekResponse]:
r"""Return a callable for the seek method over gRPC.
Seeks an existing subscription to a point in time or to a given
snapshot, whichever is provided in the request. Snapshots are
used in [Seek]
(https://cloud.google.com/pubsub/docs/replay-overview)
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
Note that both the subscription and the snapshot must be on the
same topic.
Returns:
Callable[[~.SeekRequest],
~.SeekResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "seek" not in self._stubs:
self._stubs["seek"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Subscriber/Seek",
request_serializer=pubsub.SeekRequest.serialize,
response_deserializer=pubsub.SeekResponse.deserialize,
)
return self._stubs["seek"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM access control policy on the specified
function. Replaces any existing policy.
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does
not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests the specified permissions against the IAM access control
policy for a function. If the function does not exist, this will
return an empty set of permissions, not a NOT_FOUND error.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
self.grpc_channel.close()
__all__ = ("SubscriberGrpcTransport",)
|
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from copy import copy
import requests
MOPIDY_API = '/mopidy/rpc'
_base_dict = {'jsonrpc': '2.0', 'id': 1, 'params': {}}
class Mopidy(object):
def __init__(self, url):
print "MOPIDY URL: " + url
self.is_playing = False
self.url = url + MOPIDY_API
self.volume = None
self.clear_list(force=True)
self.volume_low = 3
self.volume_high = 100
def find_artist(self, artist):
d = copy(_base_dict)
d['method'] = 'core.library.search'
d['params'] = {'artist': [artist]}
r = requests.post(self.url, data=json.dumps(d))
return r.json()['result'][1]['artists']
def get_playlists(self, filter=None):
print "GETTING PLAYLISTS"
d = copy(_base_dict)
d['method'] = 'core.playlists.as_list'
r = requests.post(self.url, data=json.dumps(d))
if filter is None:
return r.json()['result']
else:
return [l for l in r.json()['result'] if filter + ':' in l['uri']]
def find_album(self, album, filter=None):
d = copy(_base_dict)
d['method'] = 'core.library.search'
d['params'] = {'album': [album]}
r = requests.post(self.url, data=json.dumps(d))
l = [res['albums'] for res in r.json()['result'] if 'albums' in res]
if filter is None:
return l
else:
return [i for sl in l for i in sl if filter + ':' in i['uri']]
def find_exact(self, uris='null'):
d = copy(_base_dict)
d['method'] = 'core.library.find_exact'
d['params'] = {'uris': uris}
r = requests.post(self.url, data=json.dumps(d))
return r.json()
def browse(self, uri):
d = copy(_base_dict)
d['method'] = 'core.library.browse'
d['params'] = {'uri': uri}
print "BROWSE"
r = requests.post(self.url, data=json.dumps(d))
if 'result' in r.json():
return r.json()['result']
else:
return None
def clear_list(self, force=False):
if self.is_playing or force:
d = copy(_base_dict)
d['method'] = 'core.tracklist.clear'
r = requests.post(self.url, data=json.dumps(d))
return r
def add_list(self, uri):
d = copy(_base_dict)
d['method'] = 'core.tracklist.add'
if type(uri) == str or type(uri) == unicode:
d['params'] = {'uri': uri}
elif type(uri) == list:
d['params'] = {'uris': uri}
else:
return None
r = requests.post(self.url, data=json.dumps(d))
return r
def play(self):
self.is_playing = True
self.restore_volume()
d = copy(_base_dict)
d['method'] = 'core.playback.play'
r = requests.post(self.url, data=json.dumps(d))
def next(self):
if self.is_playing:
d = copy(_base_dict)
d['method'] = 'core.playback.next'
r = requests.post(self.url, data=json.dumps(d))
def previous(self):
if self.is_playing:
d = copy(_base_dict)
d['method'] = 'core.playback.previous'
r = requests.post(self.url, data=json.dumps(d))
def stop(self):
print self.is_playing
if self.is_playing:
d = copy(_base_dict)
d['method'] = 'core.playback.stop'
r = requests.post(self.url, data=json.dumps(d))
self.is_playing = False
def currently_playing(self):
if self.is_playing:
d = copy(_base_dict)
d['method'] = 'core.playback.get_current_track'
r = requests.post(self.url, data=json.dumps(d))
return r.json()['result']
else:
return None
def set_volume(self, percent):
if self.is_playing:
d = copy(_base_dict)
d['method'] = 'core.mixer.set_volume'
d['params'] = {'volume': percent}
r = requests.post(self.url, data=json.dumps(d))
def lower_volume(self):
self.set_volume(self.volume_low)
def restore_volume(self):
self.set_volume(self.volume_high)
def pause(self):
if self.is_playing:
d = copy(_base_dict)
d['method'] = 'core.playback.pause'
r = requests.post(self.url, data=json.dumps(d))
def resume(self):
if self.is_playing:
d = copy(_base_dict)
d['method'] = 'core.playback.resume'
r = requests.post(self.url, data=json.dumps(d))
def get_items(self, uri):
d = copy(_base_dict)
d['method'] = 'core.playlists.get_items'
d['params'] = {'uri': uri}
r = requests.post(self.url, data=json.dumps(d))
if 'result' in r.json():
print r.json()
return [e['uri'] for e in r.json()['result']]
else:
return None
def get_tracks(self, uri):
tracks = self.browse(uri)
ret = [t['uri'] for t in tracks if t['type'] == 'track']
sub_tracks = [t['uri'] for t in tracks if t['type'] != 'track']
for t in sub_tracks:
ret = ret + self.get_tracks(t)
return ret
def get_local_albums(self):
p = self.browse('local:directory?type=album')
return {e['name']: e for e in p if e['type'] == 'album'}
def get_local_artists(self):
p = self.browse('local:directory?type=artist')
return {e['name']: e for e in p if e['type'] == 'artist'}
def get_local_genres(self):
p = self.browse('local:directory?type=genre')
return {e['name']: e for e in p if e['type'] == 'directory'}
def get_local_playlists(self):
print "GETTING PLAYLISTS"
p = self.get_playlists('m3u')
print "RETURNING PLAYLISTS"
return {e['name']: e for e in p}
def get_spotify_playlists(self):
p = self.get_playlists('spotify')
return {e['name'].split('(by')[0].strip().lower(): e for e in p}
def get_gmusic_albums(self):
p = self.browse('gmusic:album')
print p
p = {e['name']: e for e in p if e['type'] == 'directory'}
print p
return {e.split(' - ')[1]: p[e] for e in p}
def get_gmusic_artists(self):
p = self.browse('gmusic:artist')
return {e['name']: e for e in p if e['type'] == 'directory'}
def get_gmusic_radio(self):
p = self.browse('gmusic:radio')
return {e['name']: e for e in p if e['type'] == 'directory'}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A helper class for proxy objects to remote APIs.
For more information about rpc API version numbers, see:
rpc/dispatcher.py
"""
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import common as rpc_common
from neutron.openstack.common.rpc import serializer as rpc_serializer
class RpcProxy(object):
"""A helper class for rpc clients.
This class is a wrapper around the RPC client API. It allows you to
specify the topic and API version in a single place. This is intended to
be used as a base class for a class that implements the client side of an
rpc API.
"""
# The default namespace, which can be overriden in a subclass.
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None,
serializer=None):
"""Initialize an RpcProxy.
:param topic: The topic to use for all messages.
:param default_version: The default API version to request in all
outgoing messages. This can be overridden on a per-message
basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionaly (de-)serialize entities with a
provided helper.
"""
self.topic = topic
self.default_version = default_version
self.version_cap = version_cap
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcProxy, self).__init__()
def _set_version(self, msg, vers):
"""Helper method to set the version in a message.
:param msg: The message having a version added to it.
:param vers: The version number to add to the message.
"""
v = vers if vers else self.default_version
if (self.version_cap and not
rpc_common.version_is_compatible(self.version_cap, v)):
raise rpc_common.RpcVersionCapError(version=self.version_cap)
msg['version'] = v
def _get_topic(self, topic):
"""Return the topic to use for a message."""
return topic if topic else self.topic
def can_send_version(self, version):
"""Check to see if a version is compatible with the version cap."""
return (not self.version_cap or
rpc_common.version_is_compatible(self.version_cap, version))
@staticmethod
def make_namespaced_msg(method, namespace, **kwargs):
return {'method': method, 'namespace': namespace, 'args': kwargs}
def make_msg(self, method, **kwargs):
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
**kwargs)
def _serialize_msg_args(self, context, kwargs):
"""Helper method called to serialize message arguments.
This calls our serializer on each argument, returning a new
set of args that have been serialized.
:param context: The request context
:param kwargs: The arguments to serialize
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
for argname, arg in kwargs.iteritems():
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
def call(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.call() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: The return value from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.call(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def multicall(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.multicall() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: An iterator that lets you process each of the returned values
from the remote method as they arrive.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.multicall(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def cast(self, context, msg, topic=None, version=None):
"""rpc.cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast() does not wait on any return value from the
remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast(context, self._get_topic(topic), msg)
def fanout_cast(self, context, msg, topic=None, version=None):
"""rpc.fanout_cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast() does not wait on any return value
from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast(context, self._get_topic(topic), msg)
def cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.fanout_cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast_to_server(context, server_params,
self._get_topic(topic), msg)
|
|
"""Support for Google Actions Smart Home Control."""
import asyncio
from datetime import timedelta
import logging
from uuid import uuid4
from aiohttp import ClientError, ClientResponseError
from aiohttp.web import Request, Response
import jwt
# Typing imports
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util import dt as dt_util
from .const import (
CONF_API_KEY,
CONF_CLIENT_EMAIL,
CONF_ENTITY_CONFIG,
CONF_EXPOSE,
CONF_EXPOSE_BY_DEFAULT,
CONF_EXPOSED_DOMAINS,
CONF_PRIVATE_KEY,
CONF_REPORT_STATE,
CONF_SECURE_DEVICES_PIN,
CONF_SERVICE_ACCOUNT,
GOOGLE_ASSISTANT_API_ENDPOINT,
HOMEGRAPH_SCOPE,
HOMEGRAPH_TOKEN_URL,
REPORT_STATE_BASE_URL,
REQUEST_SYNC_BASE_URL,
SOURCE_CLOUD,
)
from .helpers import AbstractConfig
from .smart_home import async_handle_message
_LOGGER = logging.getLogger(__name__)
def _get_homegraph_jwt(time, iss, key):
now = int(time.timestamp())
jwt_raw = {
"iss": iss,
"scope": HOMEGRAPH_SCOPE,
"aud": HOMEGRAPH_TOKEN_URL,
"iat": now,
"exp": now + 3600,
}
return jwt.encode(jwt_raw, key, algorithm="RS256").decode("utf-8")
async def _get_homegraph_token(hass, jwt_signed):
headers = {
"Authorization": "Bearer {}".format(jwt_signed),
"Content-Type": "application/x-www-form-urlencoded",
}
data = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": jwt_signed,
}
session = async_get_clientsession(hass)
async with session.post(HOMEGRAPH_TOKEN_URL, headers=headers, data=data) as res:
res.raise_for_status()
return await res.json()
class GoogleConfig(AbstractConfig):
"""Config for manual setup of Google."""
def __init__(self, hass, config):
"""Initialize the config."""
super().__init__(hass)
self._config = config
self._access_token = None
self._access_token_renew = None
@property
def enabled(self):
"""Return if Google is enabled."""
return True
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._config.get(CONF_SECURE_DEVICES_PIN)
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._config.get(CONF_REPORT_STATE)
def should_expose(self, state) -> bool:
"""Return if entity should be exposed."""
expose_by_default = self._config.get(CONF_EXPOSE_BY_DEFAULT)
exposed_domains = self._config.get(CONF_EXPOSED_DOMAINS)
if state.attributes.get("view") is not None:
# Ignore entities that are views
return False
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
explicit_expose = self.entity_config.get(state.entity_id, {}).get(CONF_EXPOSE)
domain_exposed_by_default = (
expose_by_default and state.domain in exposed_domains
)
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
is_default_exposed = domain_exposed_by_default and explicit_expose is not False
return is_default_exposed or explicit_expose
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return context.user_id
def should_2fa(self, state):
"""If an entity should have 2FA checked."""
return True
async def _async_request_sync_devices(self, agent_user_id: str):
if CONF_API_KEY in self._config:
await self.async_call_homegraph_api_key(
REQUEST_SYNC_BASE_URL, {"agentUserId": agent_user_id}
)
elif CONF_SERVICE_ACCOUNT in self._config:
await self.async_call_homegraph_api(
REQUEST_SYNC_BASE_URL, {"agentUserId": agent_user_id}
)
else:
_LOGGER.error("No configuration for request_sync available")
async def _async_update_token(self, force=False):
if CONF_SERVICE_ACCOUNT not in self._config:
_LOGGER.error("Trying to get homegraph api token without service account")
return
now = dt_util.utcnow()
if not self._access_token or now > self._access_token_renew or force:
token = await _get_homegraph_token(
self.hass,
_get_homegraph_jwt(
now,
self._config[CONF_SERVICE_ACCOUNT][CONF_CLIENT_EMAIL],
self._config[CONF_SERVICE_ACCOUNT][CONF_PRIVATE_KEY],
),
)
self._access_token = token["access_token"]
self._access_token_renew = now + timedelta(seconds=token["expires_in"])
async def async_call_homegraph_api_key(self, url, data):
"""Call a homegraph api with api key authentication."""
websession = async_get_clientsession(self.hass)
try:
res = await websession.post(
url, params={"key": self._config.get(CONF_API_KEY)}, json=data
)
_LOGGER.debug(
"Response on %s with data %s was %s", url, data, await res.text()
)
res.raise_for_status()
return res.status
except ClientResponseError as error:
_LOGGER.error("Request for %s failed: %d", url, error.status)
return error.status
except (asyncio.TimeoutError, ClientError):
_LOGGER.error("Could not contact %s", url)
return 500
async def async_call_homegraph_api(self, url, data):
"""Call a homegraph api with authenticaiton."""
session = async_get_clientsession(self.hass)
async def _call():
headers = {
"Authorization": "Bearer {}".format(self._access_token),
"X-GFE-SSL": "yes",
}
async with session.post(url, headers=headers, json=data) as res:
_LOGGER.debug(
"Response on %s with data %s was %s", url, data, await res.text()
)
res.raise_for_status()
return res.status
try:
await self._async_update_token()
try:
return await _call()
except ClientResponseError as error:
if error.status == 401:
_LOGGER.warning(
"Request for %s unauthorized, renewing token and retrying", url
)
await self._async_update_token(True)
return await _call()
raise
except ClientResponseError as error:
_LOGGER.error("Request for %s failed: %d", url, error.status)
return error.status
except (asyncio.TimeoutError, ClientError):
_LOGGER.error("Could not contact %s", url)
return 500
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
data = {
"requestId": uuid4().hex,
"agentUserId": agent_user_id,
"payload": message,
}
await self.async_call_homegraph_api(REPORT_STATE_BASE_URL, data)
class GoogleAssistantView(HomeAssistantView):
"""Handle Google Assistant requests."""
url = GOOGLE_ASSISTANT_API_ENDPOINT
name = "api:google_assistant"
requires_auth = True
def __init__(self, config):
"""Initialize the Google Assistant request handler."""
self.config = config
async def post(self, request: Request) -> Response:
"""Handle Google Assistant requests."""
message: dict = await request.json()
result = await async_handle_message(
request.app["hass"],
self.config,
request["hass_user"].id,
message,
SOURCE_CLOUD,
)
return self.json(result)
|
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from keystone.common import sql
from keystone.contrib.federation import core
from keystone import exception
class FederationProtocolModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'federation_protocol'
attributes = ['id', 'idp_id', 'mapping_id']
mutable_attributes = frozenset(['mapping_id'])
id = sql.Column(sql.String(64), primary_key=True)
idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id',
ondelete='CASCADE'), primary_key=True)
mapping_id = sql.Column(sql.String(64), nullable=False)
@classmethod
def from_dict(cls, dictionary):
new_dictionary = dictionary.copy()
return cls(**new_dictionary)
def to_dict(self):
"""Return a dictionary with model's attributes."""
d = dict()
for attr in self.__class__.attributes:
d[attr] = getattr(self, attr)
return d
class IdentityProviderModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'identity_provider'
attributes = ['id', 'remote_id', 'enabled', 'description']
mutable_attributes = frozenset(['description', 'enabled', 'remote_id'])
id = sql.Column(sql.String(64), primary_key=True)
remote_id = sql.Column(sql.String(256), nullable=True)
enabled = sql.Column(sql.Boolean, nullable=False)
description = sql.Column(sql.Text(), nullable=True)
@classmethod
def from_dict(cls, dictionary):
new_dictionary = dictionary.copy()
return cls(**new_dictionary)
def to_dict(self):
"""Return a dictionary with model's attributes."""
d = dict()
for attr in self.__class__.attributes:
d[attr] = getattr(self, attr)
return d
class MappingModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'mapping'
attributes = ['id', 'rules']
id = sql.Column(sql.String(64), primary_key=True)
rules = sql.Column(sql.JsonBlob(), nullable=False)
@classmethod
def from_dict(cls, dictionary):
new_dictionary = dictionary.copy()
return cls(**new_dictionary)
def to_dict(self):
"""Return a dictionary with model's attributes."""
d = dict()
for attr in self.__class__.attributes:
d[attr] = getattr(self, attr)
return d
class ServiceProviderModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'service_provider'
attributes = ['auth_url', 'id', 'enabled', 'description', 'sp_url']
mutable_attributes = frozenset(['auth_url', 'description', 'enabled',
'sp_url'])
id = sql.Column(sql.String(64), primary_key=True)
enabled = sql.Column(sql.Boolean, nullable=False)
description = sql.Column(sql.Text(), nullable=True)
auth_url = sql.Column(sql.String(256), nullable=True)
sp_url = sql.Column(sql.String(256), nullable=True)
@classmethod
def from_dict(cls, dictionary):
new_dictionary = dictionary.copy()
return cls(**new_dictionary)
def to_dict(self):
"""Return a dictionary with model's attributes."""
d = dict()
for attr in self.__class__.attributes:
d[attr] = getattr(self, attr)
return d
class Federation(core.Driver):
# Identity Provider CRUD
@sql.handle_conflicts(conflict_type='identity_provider')
def create_idp(self, idp_id, idp):
idp['id'] = idp_id
with sql.transaction() as session:
idp_ref = IdentityProviderModel.from_dict(idp)
session.add(idp_ref)
return idp_ref.to_dict()
def delete_idp(self, idp_id):
with sql.transaction() as session:
idp_ref = self._get_idp(session, idp_id)
session.delete(idp_ref)
def _get_idp(self, session, idp_id):
idp_ref = session.query(IdentityProviderModel).get(idp_id)
if not idp_ref:
raise exception.IdentityProviderNotFound(idp_id=idp_id)
return idp_ref
def list_idps(self):
with sql.transaction() as session:
idps = session.query(IdentityProviderModel)
idps_list = [idp.to_dict() for idp in idps]
return idps_list
def get_idp(self, idp_id):
with sql.transaction() as session:
idp_ref = self._get_idp(session, idp_id)
return idp_ref.to_dict()
def update_idp(self, idp_id, idp):
with sql.transaction() as session:
idp_ref = self._get_idp(session, idp_id)
old_idp = idp_ref.to_dict()
old_idp.update(idp)
new_idp = IdentityProviderModel.from_dict(old_idp)
for attr in IdentityProviderModel.mutable_attributes:
setattr(idp_ref, attr, getattr(new_idp, attr))
return idp_ref.to_dict()
# Protocol CRUD
def _get_protocol(self, session, idp_id, protocol_id):
q = session.query(FederationProtocolModel)
q = q.filter_by(id=protocol_id, idp_id=idp_id)
try:
return q.one()
except sql.NotFound:
kwargs = {'protocol_id': protocol_id,
'idp_id': idp_id}
raise exception.FederatedProtocolNotFound(**kwargs)
@sql.handle_conflicts(conflict_type='federation_protocol')
def create_protocol(self, idp_id, protocol_id, protocol):
protocol['id'] = protocol_id
protocol['idp_id'] = idp_id
with sql.transaction() as session:
self._get_idp(session, idp_id)
protocol_ref = FederationProtocolModel.from_dict(protocol)
session.add(protocol_ref)
return protocol_ref.to_dict()
def update_protocol(self, idp_id, protocol_id, protocol):
with sql.transaction() as session:
proto_ref = self._get_protocol(session, idp_id, protocol_id)
old_proto = proto_ref.to_dict()
old_proto.update(protocol)
new_proto = FederationProtocolModel.from_dict(old_proto)
for attr in FederationProtocolModel.mutable_attributes:
setattr(proto_ref, attr, getattr(new_proto, attr))
return proto_ref.to_dict()
def get_protocol(self, idp_id, protocol_id):
with sql.transaction() as session:
protocol_ref = self._get_protocol(session, idp_id, protocol_id)
return protocol_ref.to_dict()
def list_protocols(self, idp_id):
with sql.transaction() as session:
q = session.query(FederationProtocolModel)
q = q.filter_by(idp_id=idp_id)
protocols = [protocol.to_dict() for protocol in q]
return protocols
def delete_protocol(self, idp_id, protocol_id):
with sql.transaction() as session:
key_ref = self._get_protocol(session, idp_id, protocol_id)
session.delete(key_ref)
# Mapping CRUD
def _get_mapping(self, session, mapping_id):
mapping_ref = session.query(MappingModel).get(mapping_id)
if not mapping_ref:
raise exception.MappingNotFound(mapping_id=mapping_id)
return mapping_ref
@sql.handle_conflicts(conflict_type='mapping')
def create_mapping(self, mapping_id, mapping):
ref = {}
ref['id'] = mapping_id
ref['rules'] = jsonutils.dumps(mapping.get('rules'))
with sql.transaction() as session:
mapping_ref = MappingModel.from_dict(ref)
session.add(mapping_ref)
return mapping_ref.to_dict()
def delete_mapping(self, mapping_id):
with sql.transaction() as session:
mapping_ref = self._get_mapping(session, mapping_id)
session.delete(mapping_ref)
def list_mappings(self):
with sql.transaction() as session:
mappings = session.query(MappingModel)
return [x.to_dict() for x in mappings]
def get_mapping(self, mapping_id):
with sql.transaction() as session:
mapping_ref = self._get_mapping(session, mapping_id)
return mapping_ref.to_dict()
@sql.handle_conflicts(conflict_type='mapping')
def update_mapping(self, mapping_id, mapping):
ref = {}
ref['id'] = mapping_id
ref['rules'] = jsonutils.dumps(mapping.get('rules'))
with sql.transaction() as session:
mapping_ref = self._get_mapping(session, mapping_id)
old_mapping = mapping_ref.to_dict()
old_mapping.update(ref)
new_mapping = MappingModel.from_dict(old_mapping)
for attr in MappingModel.attributes:
setattr(mapping_ref, attr, getattr(new_mapping, attr))
return mapping_ref.to_dict()
def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
with sql.transaction() as session:
protocol_ref = self._get_protocol(session, idp_id, protocol_id)
mapping_id = protocol_ref.mapping_id
mapping_ref = self._get_mapping(session, mapping_id)
return mapping_ref.to_dict()
# Service Provider CRUD
@sql.handle_conflicts(conflict_type='service_provider')
def create_sp(self, sp_id, sp):
sp['id'] = sp_id
with sql.transaction() as session:
sp_ref = ServiceProviderModel.from_dict(sp)
session.add(sp_ref)
return sp_ref.to_dict()
def delete_sp(self, sp_id):
with sql.transaction() as session:
sp_ref = self._get_sp(session, sp_id)
session.delete(sp_ref)
def _get_sp(self, session, sp_id):
sp_ref = session.query(ServiceProviderModel).get(sp_id)
if not sp_ref:
raise exception.ServiceProviderNotFound(sp_id=sp_id)
return sp_ref
def list_sps(self):
with sql.transaction() as session:
sps = session.query(ServiceProviderModel)
sps_list = [sp.to_dict() for sp in sps]
return sps_list
def get_sp(self, sp_id):
with sql.transaction() as session:
sp_ref = self._get_sp(session, sp_id)
return sp_ref.to_dict()
def update_sp(self, sp_id, sp):
with sql.transaction() as session:
sp_ref = self._get_sp(session, sp_id)
old_sp = sp_ref.to_dict()
old_sp.update(sp)
new_sp = ServiceProviderModel.from_dict(old_sp)
for attr in ServiceProviderModel.mutable_attributes:
setattr(sp_ref, attr, getattr(new_sp, attr))
return sp_ref.to_dict()
|
|
import copy
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.migrations.utils import field_is_referenced, get_references
from django.db.models import NOT_PROVIDED
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.utils import make_model_tuple
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
from .exceptions import InvalidBasesError
from .utils import resolve_relation
def _get_app_label_and_model_name(model, app_label=''):
if isinstance(model, str):
split = model.split('.', 1)
return tuple(split) if len(split) == 2 else (app_label, split[0])
else:
return model._meta.app_label, model._meta.model_name
def _get_related_models(m):
"""Return all models that have a direct relationship to the given model."""
related_models = [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
related_fields_models = set()
for f in m._meta.get_fields(include_parents=True, include_hidden=True):
if f.is_relation and f.related_model is not None and not isinstance(f.related_model, str):
related_fields_models.add(f.model)
related_models.append(f.related_model)
# Reverse accessors of foreign keys to proxy models are attached to their
# concrete proxied model.
opts = m._meta
if opts.proxy and m in related_fields_models:
related_models.append(opts.concrete_model)
return related_models
def get_related_models_tuples(model):
"""
Return a list of typical (app_label, model_name) tuples for all related
models for the given model.
"""
return {
(rel_mod._meta.app_label, rel_mod._meta.model_name)
for rel_mod in _get_related_models(model)
}
def get_related_models_recursive(model):
"""
Return all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
seen = set()
queue = _get_related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_get_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState:
"""
Represent the entire project's overall state. This is the item that is
passed around - do it here rather than at the app level so that cross-app
FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
if real_apps is None:
real_apps = set()
else:
assert isinstance(real_apps, set)
self.real_apps = real_apps
self.is_delayed = False
# {remote_model_key: {model_key: [(field_name, field)]}}
self.relations = None
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def rename_model(self, app_label, old_name, new_name):
# Add a new model.
old_name_lower = old_name.lower()
new_name_lower = new_name.lower()
renamed_model = self.models[app_label, old_name_lower].clone()
renamed_model.name = new_name
self.models[app_label, new_name_lower] = renamed_model
# Repoint all fields pointing to the old model to the new one.
old_model_tuple = (app_label, old_name_lower)
new_remote_model = f'{app_label}.{new_name}'
to_reload = set()
for model_state, name, field, reference in get_references(self, old_model_tuple):
changed_field = None
if reference.to:
changed_field = field.clone()
changed_field.remote_field.model = new_remote_model
if reference.through:
if changed_field is None:
changed_field = field.clone()
changed_field.remote_field.through = new_remote_model
if changed_field:
model_state.fields[name] = changed_field
to_reload.add((model_state.app_label, model_state.name_lower))
# Reload models related to old model before removing the old model.
self.reload_models(to_reload, delay=True)
# Remove the old model.
self.remove_model(app_label, old_name_lower)
self.reload_model(app_label, new_name_lower, delay=True)
def alter_model_options(self, app_label, model_name, options, option_keys=None):
model_state = self.models[app_label, model_name]
model_state.options = {**model_state.options, **options}
if option_keys:
for key in option_keys:
if key not in options:
model_state.options.pop(key, False)
self.reload_model(app_label, model_name, delay=True)
def alter_model_managers(self, app_label, model_name, managers):
model_state = self.models[app_label, model_name]
model_state.managers = list(managers)
self.reload_model(app_label, model_name, delay=True)
def _append_option(self, app_label, model_name, option_name, obj):
model_state = self.models[app_label, model_name]
model_state.options[option_name] = [*model_state.options[option_name], obj]
self.reload_model(app_label, model_name, delay=True)
def _remove_option(self, app_label, model_name, option_name, obj_name):
model_state = self.models[app_label, model_name]
objs = model_state.options[option_name]
model_state.options[option_name] = [obj for obj in objs if obj.name != obj_name]
self.reload_model(app_label, model_name, delay=True)
def add_index(self, app_label, model_name, index):
self._append_option(app_label, model_name, 'indexes', index)
def remove_index(self, app_label, model_name, index_name):
self._remove_option(app_label, model_name, 'indexes', index_name)
def add_constraint(self, app_label, model_name, constraint):
self._append_option(app_label, model_name, 'constraints', constraint)
def remove_constraint(self, app_label, model_name, constraint_name):
self._remove_option(app_label, model_name, 'constraints', constraint_name)
def add_field(self, app_label, model_name, name, field, preserve_default):
# If preserve default is off, don't use the default for future state.
if not preserve_default:
field = field.clone()
field.default = NOT_PROVIDED
else:
field = field
self.models[app_label, model_name].fields[name] = field
# Delay rendering of relationships if it's not a relational field.
delay = not field.is_relation
self.reload_model(app_label, model_name, delay=delay)
def remove_field(self, app_label, model_name, name):
model_state = self.models[app_label, model_name]
old_field = model_state.fields.pop(name)
# Delay rendering of relationships if it's not a relational field.
delay = not old_field.is_relation
self.reload_model(app_label, model_name, delay=delay)
def alter_field(self, app_label, model_name, name, field, preserve_default):
if not preserve_default:
field = field.clone()
field.default = NOT_PROVIDED
else:
field = field
model_state = self.models[app_label, model_name]
model_state.fields[name] = field
# TODO: investigate if old relational fields must be reloaded or if
# it's sufficient if the new field is (#27737).
# Delay rendering of relationships if it's not a relational field and
# not referenced by a foreign key.
delay = (
not field.is_relation and
not field_is_referenced(self, (app_label, model_name), (name, field))
)
self.reload_model(app_label, model_name, delay=delay)
def rename_field(self, app_label, model_name, old_name, new_name):
model_state = self.models[app_label, model_name]
# Rename the field.
fields = model_state.fields
try:
found = fields.pop(old_name)
except KeyError:
raise FieldDoesNotExist(
f"{app_label}.{model_name} has no field named '{old_name}'"
)
fields[new_name] = found
for field in fields.values():
# Fix from_fields to refer to the new field.
from_fields = getattr(field, 'from_fields', None)
if from_fields:
field.from_fields = tuple([
new_name if from_field_name == old_name else from_field_name
for from_field_name in from_fields
])
# Fix index/unique_together to refer to the new field.
options = model_state.options
for option in ('index_together', 'unique_together'):
if option in options:
options[option] = [
[new_name if n == old_name else n for n in together]
for together in options[option]
]
# Fix to_fields to refer to the new field.
delay = True
references = get_references(self, (app_label, model_name), (old_name, found))
for *_, field, reference in references:
delay = False
if reference.to:
remote_field, to_fields = reference.to
if getattr(remote_field, 'field_name', None) == old_name:
remote_field.field_name = new_name
if to_fields:
field.to_fields = tuple([
new_name if to_field_name == old_name else to_field_name
for to_field_name in to_fields
])
self.reload_model(app_label, model_name, delay=delay)
def _find_reload_model(self, app_label, model_name, delay=False):
if delay:
self.is_delayed = True
related_models = set()
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
pass
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
if delay:
related_models = get_related_models_tuples(old_model)
else:
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for field in model_state.fields.values():
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
if delay:
related_models.update(get_related_models_tuples(rel_model))
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
return related_models
def reload_model(self, app_label, model_name, delay=False):
if 'apps' in self.__dict__: # hasattr would cache the property
related_models = self._find_reload_model(app_label, model_name, delay)
self._reload(related_models)
def reload_models(self, models, delay=True):
if 'apps' in self.__dict__: # hasattr would cache the property
related_models = set()
for app_label, model_name in models:
related_models.update(self._find_reload_model(app_label, model_name, delay))
self._reload(related_models)
def _reload(self, related_models):
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def update_model_field_relation(
self, model, model_key, field_name, field, concretes,
):
remote_model_key = resolve_relation(model, *model_key)
if remote_model_key[0] not in self.real_apps and remote_model_key in concretes:
remote_model_key = concretes[remote_model_key]
self.relations[remote_model_key][model_key].append((field_name, field))
def resolve_model_field_relations(
self, model_key, field_name, field, concretes=None,
):
remote_field = field.remote_field
if not remote_field:
return
if concretes is None:
concretes, _ = self._get_concrete_models_mapping_and_proxy_models()
self.update_model_field_relation(
remote_field.model, model_key, field_name, field, concretes,
)
through = getattr(remote_field, 'through', None)
if not through:
return
self.update_model_field_relation(through, model_key, field_name, field, concretes)
def resolve_model_relations(self, model_key, concretes=None):
if concretes is None:
concretes, _ = self._get_concrete_models_mapping_and_proxy_models()
model_state = self.models[model_key]
for field_name, field in model_state.fields.items():
self.resolve_model_field_relations(model_key, field_name, field, concretes)
def resolve_fields_and_relations(self):
# Resolve fields.
for model_state in self.models.values():
for field_name, field in model_state.fields.items():
field.name = field_name
# Resolve relations.
# {remote_model_key: {model_key: [(field_name, field)]}}
self.relations = defaultdict(partial(defaultdict, list))
concretes, proxies = self._get_concrete_models_mapping_and_proxy_models()
for model_key in concretes:
self.resolve_model_relations(model_key, concretes)
for model_key in proxies:
self.relations[model_key] = self.relations[concretes[model_key]]
def get_concrete_model_key(self, model):
concrete_models_mapping, _ = self._get_concrete_models_mapping_and_proxy_models()
model_key = make_model_tuple(model)
return concrete_models_mapping[model_key]
def _get_concrete_models_mapping_and_proxy_models(self):
concrete_models_mapping = {}
proxy_models = {}
# Split models to proxy and concrete models.
for model_key, model_state in self.models.items():
if model_state.options.get('proxy'):
proxy_models[model_key] = model_state
# Find a concrete model for the proxy.
concrete_models_mapping[model_key] = self._find_concrete_model_from_proxy(
proxy_models, model_state,
)
else:
concrete_models_mapping[model_key] = model_key
return concrete_models_mapping, proxy_models
def _find_concrete_model_from_proxy(self, proxy_models, model_state):
for base in model_state.bases:
base_key = make_model_tuple(base)
base_state = proxy_models.get(base_key)
if not base_state:
# Concrete model found, stop looking at bases.
return base_key
return self._find_concrete_model_from_proxy(proxy_models, base_state)
def clone(self):
"""Return an exact copy of this ProjectState."""
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
new_state.is_delayed = self.is_delayed
return new_state
def clear_delayed_apps_cache(self):
if self.is_delayed and 'apps' in self.__dict__:
del self.__dict__['apps']
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@classmethod
def from_apps(cls, apps):
"""Take an Apps and return a ProjectState matching it."""
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
return self.models == other.models and self.real_apps == other.real_apps
class AppConfigStub(AppConfig):
"""Stub of an AppConfig. Only provides a label and a dict of models."""
def __init__(self, label):
self.apps = None
self.models = {}
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
self.label = label
self.name = label
def import_models(self):
self.models = self.apps.all_models[self.label]
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted([*real_apps, *app_labels])]
super().__init__(app_configs)
# These locks get in the way of copying as implemented in clone(),
# which is called whenever Django duplicates a StateApps before
# updating it.
self._lock = None
self.ready_event = None
self.render_multiple([*models.values(), *self.real_models])
# There shouldn't be any operations pending at this point.
from django.core.checks.model_checks import _check_lazy_references
ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set()
errors = _check_lazy_references(self, ignore=ignore)
if errors:
raise ValueError("\n".join(error.msg for error in errors))
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""Return a clone of this registry."""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# Set the pointer to the correct app registry.
for app_config in clone.app_configs.values():
app_config.apps = clone
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].apps = self
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState:
"""
Represent a Django Model. Don't use the actual Model class as it's not
designed to have its options changed - instead, mutate this one and then
render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = name
self.fields = dict(fields)
self.options = options or {}
self.options.setdefault('indexes', [])
self.options.setdefault('constraints', [])
self.bases = bases or (models.Model,)
self.managers = managers or []
for name, field in self.fields.items():
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
'Use a string reference instead.' % name
)
if field.many_to_many and hasattr(field.remote_field.through, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" does. '
'Use a string reference instead.' % name
)
# Sanity-check that indexes have their name set.
for index in self.options['indexes']:
if not index.name:
raise ValueError(
"Indexes passed to ModelState require a name attribute. "
"%r doesn't have one." % index
)
@cached_property
def name_lower(self):
return self.name.lower()
def get_field(self, field_name):
field_name = (
self.options['order_with_respect_to']
if field_name == '_order'
else field_name
)
return self.fields[field_name]
@classmethod
def from_model(cls, model, exclude_rels=False):
"""Given a model, return a ModelState representing it."""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, models.OrderWrt):
continue
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s: %s" % (
name,
model._meta.label,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
elif name == "indexes":
indexes = [idx.clone() for idx in model._meta.indexes]
for index in indexes:
if not index.name:
index.set_name_with_model(model)
options['indexes'] = indexes
elif name == 'constraints':
options['constraints'] = [con.clone() for con in model._meta.constraints]
else:
options[name] = model._meta.original_attrs[name]
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
# Private fields are ignored, so remove options that refer to them.
elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}:
del options['order_with_respect_to']
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
base._meta.label_lower
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, str) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
managers = []
manager_names = set()
default_manager_shim = None
for manager in model._meta.managers:
if manager.name in manager_names:
# Skip overridden managers.
continue
elif manager.use_in_migrations:
# Copy managers usable in migrations.
new_manager = copy.copy(manager)
new_manager._set_creation_counter()
elif manager is model._base_manager or manager is model._default_manager:
# Shim custom managers used as default and base managers.
new_manager = models.Manager()
new_manager.model = manager.model
new_manager.name = manager.name
if manager is model._default_manager:
default_manager_shim = new_manager
else:
continue
manager_names.add(manager.name)
managers.append((manager.name, new_manager))
# Ignore a shimmed default manager called objects if it's the only one.
if managers == [('objects', default_manager_shim)]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
def construct_managers(self):
"""Deep-clone the managers using deconstruction."""
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"""Return an exact copy of this ModelState."""
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=dict(self.fields),
# Since options are shallow-copied here, operations such as
# AddIndex must replace their option (e.g 'indexes') rather
# than mutating it.
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"""Create a Model object from our current state into the given apps."""
# First, make a Meta object
meta_contents = {'app_label': self.app_label, 'apps': apps, **self.options}
meta = type("Meta", (), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, str) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Clone fields for the body, add other bits.
body = {name: field.clone() for name, field in self.fields.items()}
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(self.name, bases, body)
def get_index_by_name(self, name):
for index in self.options['indexes']:
if index.name == name:
return index
raise ValueError("No index named %s on model %s" % (name, self.name))
def get_constraint_by_name(self, name):
for constraint in self.options['constraints']:
if constraint.name == name:
return constraint
raise ValueError('No constraint named %s on model %s' % (name, self.name))
def __repr__(self):
return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all(
k1 == k2 and f1.deconstruct()[1:] == f2.deconstruct()[1:]
for (k1, f1), (k2, f2) in zip(
sorted(self.fields.items()),
sorted(other.fields.items()),
)
) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
|
|
import numpy as np
from scipy.ndimage import map_coordinates
from scipy.fftpack import fftn, fftshift, ifftshift
from dipy.reconst.odf import OdfModel, OdfFit, gfa
from dipy.reconst.cache import Cache
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.recspeed import local_maxima, remove_similar_vertices
class DiffusionSpectrumModel(OdfModel, Cache):
def __init__(self,
gtab,
qgrid_size=17,
r_start=2.1,
r_end=6.,
r_step=0.2,
filter_width=32,
normalize_peaks=False):
r""" Diffusion Spectrum Imaging
The theoretical idea underlying this method is that the diffusion
propagator $P(\mathbf{r})$ (probability density function of the average
spin displacements) can be estimated by applying 3D FFT to the signal
values $S(\mathbf{q})$
..math::
:nowrap:
\begin{eqnarray}
P(\mathbf{r}) & = & S_{0}^{-1}\int S(\mathbf{q})\exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{r}
\end{eqnarray}
where $\mathbf{r}$ is the displacement vector and $\mathbf{q}$ is the
wavector which corresponds to different gradient directions. Method
used to calculate the ODFs. Here we implement the method proposed by
Wedeen et. al [1]_.
The main assumption for this model is fast gradient switching and that
the acquisition gradients will sit on a keyhole Cartesian grid in
q_space [3]_.
Parameters
----------
gtab : GradientTable,
Gradient directions and bvalues container class
qgrid_size : int,
has to be an odd number. Sets the size of the q_space grid.
For example if qgrid_size is 17 then the shape of the grid will be
``(17, 17, 17)``.
r_start : float,
ODF is sampled radially in the PDF. This parameters shows where the
sampling should start.
r_end : float,
Radial endpoint of ODF sampling
r_step : float,
Step size of the ODf sampling from r_start to r_end
filter_width : float,
Strength of the hanning filter
References
----------
.. [1] Wedeen V.J et. al, "Mapping Complex Tissue Architecture With
Diffusion Spectrum Magnetic Resonance Imaging", MRM 2005.
.. [2] Canales-Rodriguez E.J et. al, "Deconvolution in Diffusion
Spectrum Imaging", Neuroimage, 2010.
.. [3] Garyfallidis E, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Examples
--------
In this example where we provide the data, a gradient table
and a reconstruction sphere, we calculate generalized FA for the first
voxel in the data with the reconstruction performed using DSI.
>>> from dipy.data import dsi_voxels, get_sphere
>>> data, gtab = dsi_voxels()
>>> sphere = get_sphere('symmetric724')
>>> from dipy.reconst.dsi import DiffusionSpectrumModel
>>> ds = DiffusionSpectrumModel(gtab)
>>> dsfit = ds.fit(data)
>>> from dipy.reconst.odf import gfa
>>> np.round(gfa(dsfit.odf(sphere))[0, 0, 0], 2)
0.11
Notes
------
A. Have in mind that DSI expects gradients on both hemispheres. If your
gradients span only one hemisphere you need to duplicate the data and
project them to the other hemisphere before calling this class. The
function dipy.reconst.dsi.half_to_full_qspace can be used for this
purpose.
B. If you increase the size of the grid (parameter qgrid_size) you will
most likely also need to update the r_* parameters. This is because
the added zero padding from the increase of gqrid_size also introduces
a scaling of the PDF.
C. We assume that data only one b0 volume is provided.
See Also
--------
dipy.reconst.gqi.GeneralizedQSampling
"""
self.bvals = gtab.bvals
self.bvecs = gtab.bvecs
self.normalize_peaks = normalize_peaks
# 3d volume for Sq
if qgrid_size % 2 == 0:
raise ValueError('qgrid_size needs to be an odd integer')
self.qgrid_size = qgrid_size
# necessary shifting for centering
self.origin = self.qgrid_size // 2
# hanning filter width
self.filter = hanning_filter(gtab, filter_width)
# odf sampling radius
self.qradius = np.arange(r_start, r_end, r_step)
self.qradiusn = len(self.qradius)
# create qspace grid
self.qgrid = create_qspace(gtab, self.origin)
b0 = np.min(self.bvals)
self.dn = (self.bvals > b0).sum()
self.gtab = gtab
@multi_voxel_fit
def fit(self, data):
return DiffusionSpectrumFit(self, data)
class DiffusionSpectrumFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF and other properties for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
self.model = model
self.data = data
self.qgrid_sz = self.model.qgrid_size
self.dn = self.model.dn
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
def pdf(self, normalized=True):
""" Applies the 3D FFT in the q-space grid to generate
the diffusion propagator
"""
values = self.data * self.model.filter
# create the signal volume
Sq = np.zeros((self.qgrid_sz, self.qgrid_sz, self.qgrid_sz))
# fill q-space
for i in range(len(values)):
qx, qy, qz = self.model.qgrid[i]
Sq[qx, qy, qz] += values[i]
# apply fourier transform
Pr = fftshift(np.real(fftn(ifftshift(Sq),
3 * (self.qgrid_sz, ))))
# clipping negative values to 0 (ringing artefact)
Pr = np.clip(Pr, 0, Pr.max())
# normalize the propagator to obtain a pdf
if normalized:
Pr /= Pr.sum()
return Pr
def rtop_signal(self, filtering=True):
""" Calculates the return to origin probability (rtop) from the signal
rtop equals to the sum of all signal values
Parameters
----------
filtering : boolean
default true, perform the hanning filtering
Returns
-------
rtop : float
the return to origin probability
"""
if filtering:
values = self.data * self.model.filter
else:
values = self.data
rtop = values.sum()
return rtop
def rtop_pdf(self, normalized=True):
r""" Calculates the return to origin probability from the propagator, which is
the propagator evaluated at zero (see Descoteaux et Al. [1]_, Tuch [2]_, Wu et al. [3]_)
rtop = P(0)
Parameters
----------
normalized : boolean
default true, normalize the propagator by its sum in order to obtain a pdf
Returns
-------
rtop : float
the return to origin probability
References
----------
.. [1] Descoteaux M. et. al, "Multiple q-shell diffusion propagator
imaging", Medical Image Analysis, vol 15, No. 4, p. 603-621, 2011.
.. [2] Tuch D.S., "Diffusion MRI of Complex Tissue Structure",
PhD Thesis, 2002.
.. [3] Wu Y. et. al, "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865, 2008
"""
Pr = self.pdf(normalized=normalized)
center = self.qgrid_sz // 2
rtop = Pr[center, center, center]
return rtop
def msd_discrete(self, normalized=True):
r""" Calculates the mean squared displacement on the discrete propagator
..math::
:nowrap:
\begin{equation}
MSD:{DSI}=\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}\int_{-\infty}^{\infty} P(\hat{\mathbf{r}}) \cdot \hat{\mathbf{r}}^{2} \ dr_x \ dr_y \ dr_z
\end{equation}
where $\hat{\mathbf{r}}$ is a point in the 3D Propagator space (see Wu et. al [1]_).
Parameters
----------
normalized : boolean
default true, normalize the propagator by its sum in order to obtain a pdf
Returns
-------
msd : float
the mean square displacement
References
----------
.. [1] Wu Y. et. al, "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
"""
Pr = self.pdf(normalized=normalized)
# create the r squared 3D matrix
gridsize = self.qgrid_sz
center = gridsize // 2
a = np.arange(gridsize) - center
x = np.tile(a, (gridsize, gridsize, 1))
y = np.tile(a.reshape(gridsize, 1), (gridsize, 1, gridsize))
z = np.tile(a.reshape(gridsize, 1, 1), (1, gridsize, gridsize))
r2 = x ** 2 + y ** 2 + z ** 2
msd = np.sum(Pr * r2) / float((gridsize ** 3))
return msd
def odf(self, sphere):
r""" Calculates the real discrete odf for a given discrete sphere
..math::
:nowrap:
\begin{equation}
\psi_{DSI}(\hat{\mathbf{u}})=\int_{0}^{\infty}P(r\hat{\mathbf{u}})r^{2}dr
\end{equation}
where $\hat{\mathbf{u}}$ is the unit vector which corresponds to a
sphere point.
"""
interp_coords = self.model.cache_get('interp_coords',
key=sphere)
if interp_coords is None:
interp_coords = pdf_interp_coords(sphere,
self.model.qradius,
self.model.origin)
self.model.cache_set('interp_coords', sphere, interp_coords)
Pr = self.pdf()
# calculate the orientation distribution function
return pdf_odf(Pr, self.model.qradius, interp_coords)
def create_qspace(gtab, origin):
""" create the 3D grid which holds the signal values (q-space)
Parameters
----------
gtab : GradientTable
origin : (3,) ndarray
center of the qspace
Returns
-------
qgrid : ndarray
qspace coordinates
"""
# create the q-table from bvecs and bvals
qtable = create_qtable(gtab)
# center and index in qspace volume
qgrid = qtable + origin
return qgrid.astype('i8')
def create_qtable(gtab):
""" create a normalized version of gradients
"""
bv = gtab.bvals
bmin = np.sort(bv)[1]
bv = np.sqrt(bv / bmin)
qtable = np.vstack((bv, bv, bv)).T * gtab.bvecs
return np.floor(qtable + .5)
def hanning_filter(gtab, filter_width):
""" create a hanning window
The signal is premultiplied by a Hanning window before
Fourier transform in order to ensure a smooth attenuation
of the signal at high q values.
Parameters
----------
gtab : GradientTable
filter_width : int
Returns
-------
filter : (N,) ndarray
where N is the number of non-b0 gradient directions
"""
qtable = create_qtable(gtab)
# calculate r - hanning filter free parameter
r = np.sqrt(qtable[:, 0] ** 2 + qtable[:, 1] ** 2 + qtable[:, 2] ** 2)
# setting hanning filter width and hanning
return .5 * np.cos(2 * np.pi * r / filter_width)
def pdf_interp_coords(sphere, rradius, origin):
""" Precompute coordinates for ODF calculation from the PDF
Parameters
----------
sphere : object,
Sphere
rradius : array, shape (N,)
line interpolation points
origin : array, shape (3,)
center of the grid
"""
interp_coords = rradius * sphere.vertices[np.newaxis].T
origin = np.reshape(origin, [-1, 1, 1])
interp_coords = origin + interp_coords
return interp_coords
def pdf_odf(Pr, rradius, interp_coords):
r""" Calculates the real ODF from the diffusion propagator(PDF) Pr
Parameters
----------
Pr : array, shape (X, X, X)
probability density function
rradius : array, shape (N,)
interpolation range on the radius
interp_coords : array, shape (3, M, N)
coordinates in the pdf for interpolating the odf
"""
PrIs = map_coordinates(Pr, interp_coords, order=1)
odf = (PrIs * rradius ** 2).sum(-1)
return odf
def half_to_full_qspace(data, gtab):
""" Half to full Cartesian grid mapping
Useful when dMRI data are provided in one qspace hemisphere as
DiffusionSpectrum expects data to be in full qspace.
Parameters
----------
data : array, shape (X, Y, Z, W)
where (X, Y, Z) volume size and W number of gradient directions
gtab : GradientTable
container for b-values and b-vectors (gradient directions)
Returns
-------
new_data : array, shape (X, Y, Z, 2 * W -1)
new_gtab : GradientTable
Notes
-----
We assume here that only on b0 is provided with the initial data. If that
is not the case then you will need to write your own preparation function
before providing the gradients and the data to the DiffusionSpectrumModel
class.
"""
bvals = gtab.bvals
bvecs = gtab.bvecs
bvals = np.append(bvals, bvals[1:])
bvecs = np.append(bvecs, - bvecs[1:], axis=0)
data = np.append(data, data[..., 1:], axis=-1)
gtab.bvals = bvals.copy()
gtab.bvecs = bvecs.copy()
return data, gtab
def project_hemisph_bvecs(gtab):
""" Project any near identical bvecs to the other hemisphere
Parameters
----------
gtab : object,
GradientTable
Notes
-------
Useful only when working with some types of dsi data.
"""
bvals = gtab.bvals
bvecs = gtab.bvecs
bvs = bvals[1:]
bvcs = bvecs[1:]
b = bvs[:, None] * bvcs
bb = np.zeros((len(bvs), len(bvs)))
pairs = []
for (i, vec) in enumerate(b):
for (j, vec2) in enumerate(b):
bb[i, j] = np.sqrt(np.sum((vec - vec2) ** 2))
I = np.argsort(bb[i])
for j in I:
if j != i:
break
if (j, i) in pairs:
pass
else:
pairs.append((i, j))
bvecs2 = bvecs.copy()
for (i, j) in pairs:
bvecs2[1 + j] = - bvecs2[1 + j]
return bvecs2, pairs
class DiffusionSpectrumDeconvModel(DiffusionSpectrumModel):
def __init__(self, gtab, qgrid_size=35, r_start=4.1, r_end=13.,
r_step=0.4, filter_width=np.inf, normalize_peaks=False):
r""" Diffusion Spectrum Deconvolution
The idea is to remove the convolution on the DSI propagator that is
caused by the truncation of the q-space in the DSI sampling.
..math::
:nowrap:
\begin{eqnarray*}
P_{dsi}(\mathbf{r}) & = & S_{0}^{-1}\iiint\limits_{\| \mathbf{q} \| \le \mathbf{q_{max}}} S(\mathbf{q})\exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{q} \\
& = & S_{0}^{-1}\iiint\limits_{\mathbf{q}} \left( S(\mathbf{q}) \cdot M(\mathbf{q}) \right) \exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{q} \\
& = & P(\mathbf{r}) \otimes \left( S_{0}^{-1}\iiint\limits_{\mathbf{q}} M(\mathbf{q}) \exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{q} \right) \\
\end{eqnarray*}
where $\mathbf{r}$ is the displacement vector and $\mathbf{q}$ is the
wavector which corresponds to different gradient directions,
$M(\mathbf{q})$ is a mask corresponding to your q-space sampling and
$\otimes$ is the convolution operator [1]_.
Parameters
----------
gtab : GradientTable,
Gradient directions and bvalues container class
qgrid_size : int,
has to be an odd number. Sets the size of the q_space grid.
For example if qgrid_size is 35 then the shape of the grid will be
``(35, 35, 35)``.
r_start : float,
ODF is sampled radially in the PDF. This parameters shows where the
sampling should start.
r_end : float,
Radial endpoint of ODF sampling
r_step : float,
Step size of the ODf sampling from r_start to r_end
filter_width : float,
Strength of the hanning filter
References
----------
.. [1] Canales-Rodriguez E.J et. al, "Deconvolution in Diffusion
Spectrum Imaging", Neuroimage, 2010.
.. [2] Biggs David S.C. et. al, "Acceleration of Iterative Image
Restoration Algorithms", Applied Optics, vol. 36, No. 8, p. 1766-1775,
1997.
"""
DiffusionSpectrumModel.__init__(self, gtab, qgrid_size,
r_start, r_end, r_step,
filter_width,
normalize_peaks)
@multi_voxel_fit
def fit(self, data):
return DiffusionSpectrumDeconvFit(self, data)
class DiffusionSpectrumDeconvFit(DiffusionSpectrumFit):
def pdf(self):
""" Applies the 3D FFT in the q-space grid to generate
the DSI diffusion propagator, remove the background noise with a
hard threshold and then deconvolve the propagator with the
Lucy-Richardson deconvolution algorithm
"""
values = self.data
# create the signal volume
Sq = np.zeros((self.qgrid_sz, self.qgrid_sz, self.qgrid_sz))
# fill q-space
for i in range(len(values)):
qx, qy, qz = self.model.qgrid[i]
Sq[qx, qy, qz] += values[i]
# get deconvolution PSF
DSID_PSF = self.model.cache_get('deconv_psf', key=self.model.gtab)
if DSID_PSF is None:
DSID_PSF = gen_PSF(self.model.qgrid, self.qgrid_sz,
self.qgrid_sz, self.qgrid_sz)
self.model.cache_set('deconv_psf', self.model.gtab, DSID_PSF)
# apply fourier transform
Pr = fftshift(np.abs(np.real(fftn(ifftshift(Sq),
3 * (self.qgrid_sz, )))))
# threshold propagator
Pr = threshold_propagator(Pr)
# apply LR deconvolution
Pr = LR_deconv(Pr, DSID_PSF, 5, 2)
return Pr
def threshold_propagator(P, estimated_snr=15.):
"""
Applies hard threshold on the propagator to remove background noise for the
deconvolution.
"""
P_thresholded = P.copy()
threshold = P_thresholded.max() / float(estimated_snr)
P_thresholded[P_thresholded < threshold] = 0
return P_thresholded / P_thresholded.sum()
def gen_PSF(qgrid_sampling, siz_x, siz_y, siz_z):
"""
Generate a PSF for DSI Deconvolution by taking the ifft of the binary
q-space sampling mask and truncating it to keep only the center.
"""
Sq = np.zeros((siz_x, siz_y, siz_z))
# fill q-space
for i in range(qgrid_sampling.shape[0]):
qx, qy, qz = qgrid_sampling[i]
Sq[qx, qy, qz] = 1
return Sq * np.real(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(Sq))))
def LR_deconv(prop, psf, numit=5, acc_factor=1):
r"""
Perform Lucy-Richardson deconvolution algorithm on a 3D array.
Parameters
----------
prop : 3-D ndarray of dtype float
The 3D volume to be deconvolve
psf : 3-D ndarray of dtype float
The filter that will be used for the deconvolution.
numit : int
Number of Lucy-Richardson iteration to perform.
acc_factor : float
Exponential acceleration factor as in [1]_.
References
----------
.. [1] Biggs David S.C. et. al, "Acceleration of Iterative Image
Restoration Algorithms", Applied Optics, vol. 36, No. 8, p. 1766-1775,
1997.
"""
eps = 1e-16
# Create the otf of the same size as prop
otf = np.zeros_like(prop)
# prop.ndim==3
otf[otf.shape[0] // 2 - psf.shape[0] // 2:otf.shape[0] // 2 +
psf.shape[0] // 2 + 1, otf.shape[1] // 2 - psf.shape[1] // 2:
otf.shape[1] // 2 + psf.shape[1] // 2 + 1, otf.shape[2] // 2 -
psf.shape[2] // 2:otf.shape[2] // 2 + psf.shape[2] // 2 + 1] = psf
otf = np.real(np.fft.fftn(np.fft.ifftshift(otf)))
# Enforce Positivity
prop = np.clip(prop, 0, np.inf)
prop_deconv = prop.copy()
for it in range(numit):
# Blur the estimate
reBlurred = np.real(np.fft.ifftn(otf * np.fft.fftn(prop_deconv)))
reBlurred[reBlurred < eps] = eps
# Update the estimate
prop_deconv = prop_deconv * (np.real(np.fft.ifftn(otf *
np.fft.fftn((prop / reBlurred) + eps)))) ** acc_factor
# Enforce positivity
prop_deconv = np.clip(prop_deconv, 0, np.inf)
return prop_deconv / prop_deconv.sum()
if __name__ == '__main__':
pass
|
|
"""
This implements resources for twisted webservers using the wsgi
interface of django. This alleviates the need of running e.g. an
apache server to serve Evennia's web presence (although you could do
that too if desired).
The actual servers are started inside server.py as part of the Evennia
application.
(Lots of thanks to http://githup.com/clemensha/twisted-wsgi-django for
a great example/aid on how to do this.)
"""
import urlparse
from urllib import quote as urlquote
from twisted.web import resource, http, server
from twisted.internet import reactor
from twisted.application import internet
from twisted.web.proxy import ReverseProxyResource
from twisted.web.server import NOT_DONE_YET
from twisted.python import threadpool
from twisted.internet import defer
from twisted.web.wsgi import WSGIResource
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from evennia.utils import logger
_UPSTREAM_IPS = settings.UPSTREAM_IPS
_DEBUG = settings.DEBUG
class LockableThreadPool(threadpool.ThreadPool):
"""
Threadpool that can be locked from accepting new requests.
"""
def __init__(self, *args, **kwargs):
self._accept_new = True
threadpool.ThreadPool.__init__(self, *args, **kwargs)
def lock(self):
self._accept_new = False
def callInThread(self, func, *args, **kwargs):
"""
called in the main reactor thread. Makes sure the pool
is not locked before continuing.
"""
if self._accept_new:
threadpool.ThreadPool.callInThread(self, func, *args, **kwargs)
#
# X-Forwarded-For Handler
#
class HTTPChannelWithXForwardedFor(http.HTTPChannel):
"""
HTTP xforward class
"""
def allHeadersReceived(self):
"""
Check to see if this is a reverse proxied connection.
"""
CLIENT = 0
http.HTTPChannel.allHeadersReceived(self)
req = self.requests[-1]
client_ip, port = self.transport.client
proxy_chain = req.getHeader('X-FORWARDED-FOR')
if proxy_chain and client_ip in _UPSTREAM_IPS:
forwarded = proxy_chain.split(', ', 1)[CLIENT]
self.transport.client = (forwarded, port)
# Monkey-patch Twisted to handle X-Forwarded-For.
http.HTTPFactory.protocol = HTTPChannelWithXForwardedFor
class EvenniaReverseProxyResource(ReverseProxyResource):
def getChild(self, path, request):
"""
Create and return a proxy resource with the same proxy configuration
as this one, except that its path also contains the segment given by
path at the end.
Args:
path (str): Url path.
request (Request object): Incoming request.
Return:
resource (EvenniaReverseProxyResource): A proxy resource.
"""
request.notifyFinish().addErrback(
lambda f: logger.log_trace("%s\nCaught errback in webserver.py:75." % f))
return EvenniaReverseProxyResource(
self.host, self.port, self.path + '/' + urlquote(path, safe=""),
self.reactor)
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
Args:
request (Request): Incoming request.
Returns:
not_done (char): Indicator to note request not yet finished.
"""
# RFC 2616 tells us that we can omit the port if it's the default port,
# but we have to provide it otherwise
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request)
clientFactory.noisy = False
self.reactor.connectTCP(self.host, self.port, clientFactory)
# don't trigger traceback if connection is lost before request finish.
request.notifyFinish().addErrback(
lambda f: logger.log_trace("%s\nCaught errback in webserver.py:75." % f))
return NOT_DONE_YET
#
# Website server resource
#
class DjangoWebRoot(resource.Resource):
"""
This creates a web root (/) that Django
understands by tweaking the way
child instances are recognized.
"""
def __init__(self, pool):
"""
Setup the django+twisted resource.
Args:
pool (ThreadPool): The twisted threadpool.
"""
self.pool = pool
self._echo_log = True
self._pending_requests = {}
resource.Resource.__init__(self)
self.wsgi_resource = WSGIResource(reactor, pool, WSGIHandler())
def empty_threadpool(self):
"""
Converts our _pending_requests list of deferreds into a DeferredList
Returns:
deflist (DeferredList): Contains all deferreds of pending requests.
"""
self.pool.lock()
if self._pending_requests and self._echo_log:
self._echo_log = False # just to avoid multiple echoes
msg = "Webserver waiting for %i requests ... "
logger.log_info(msg % len(self._pending_requests))
return defer.DeferredList(self._pending_requests, consumeErrors=True)
def _decrement_requests(self, *args, **kwargs):
self._pending_requests.pop(kwargs.get('deferred', None), None)
def getChild(self, path, request):
"""
To make things work we nudge the url tree to make this the
root.
Args:
path (str): Url path.
request (Request object): Incoming request.
Notes:
We make sure to save the request queue so
that we can safely kill the threadpool
on a server reload.
"""
path0 = request.prepath.pop(0)
request.postpath.insert(0, path0)
deferred = request.notifyFinish()
self._pending_requests[deferred] = deferred
deferred.addBoth(self._decrement_requests, deferred=deferred)
return self.wsgi_resource
#
# Site with deactivateable logging
#
class Website(server.Site):
"""
This class will only log http requests if settings.DEBUG is True.
"""
noisy = False
def log(self, request):
"""Conditional logging"""
if _DEBUG:
server.Site.log(self, request)
#
# Threaded Webserver
#
class WSGIWebServer(internet.TCPServer):
"""
This is a WSGI webserver. It makes sure to start
the threadpool after the service itself started,
so as to register correctly with the twisted daemon.
call with WSGIWebServer(threadpool, port, wsgi_resource)
"""
def __init__(self, pool, *args, **kwargs):
"""
This just stores the threadpool.
Args:
pool (ThreadPool): The twisted threadpool.
args, kwargs (any): Passed on to the TCPServer.
"""
self.pool = pool
internet.TCPServer.__init__(self, *args, **kwargs)
def startService(self):
"""
Start the pool after the service starts.
"""
internet.TCPServer.startService(self)
self.pool.start()
def stopService(self):
"""
Safely stop the pool after the service stops.
"""
internet.TCPServer.stopService(self)
self.pool.stop()
|
|
#!/usr/bin/env python3
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import yaml
from params import Inputs, TestCases, ATTRIBUTE_CLUSTER_DNS, PARAMETERS
def make_mock_yaml():
return yaml.safe_load("""
spec:
template:
spec:
containers:
- name: kubedns
args: []
resources:
limits:
cpu: 0m
- name: dnsmasq
args: []
resources:
limits:
cpu: 0m
""")
def make_mock_coredns_configmap_yaml():
return yaml.safe_load("""
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30 {
success 1000
denial 1000
}
}
""")
def make_mock_coredns_deployment_yaml():
return yaml.safe_load("""
spec:
template:
spec:
containers:
- name: coredns
args: []
resources:
limits:
cpu: 0m
""")
class ParamsTest(unittest.TestCase):
def test_params(self):
values = {
'dnsmasq_cpu': 100,
'dnsmasq_cache': 200,
'kubedns_cpu': 300,
'max_qps': 400,
'query_file': 'abc',
'run_length_seconds': 120,
}
inputs = Inputs(make_mock_yaml(), None, [])
for param in PARAMETERS:
if param.name not in values:
continue
param.set(inputs, values[param.name])
self.assertEqual(
'100m',
inputs.deployment_yaml['spec']['template']['spec']['containers']\
[1]['resources']['limits']['cpu'])
self.assertTrue(
'--cache-size=200' in
inputs.deployment_yaml['spec']['template']['spec']['containers']\
[1]['args'])
self.assertEqual(
'300m',
inputs.deployment_yaml['spec']['template']['spec']['containers']\
[0]['resources']['limits']['cpu'])
self.assertEqual(
'-l,120,-Q,400,-d,/queries/abc',
','.join(inputs.dnsperf_cmdline))
def test_coredns_params(self):
values = {
'coredns_cpu': 100,
'coredns_cache': 200,
}
inputs = Inputs(make_mock_coredns_deployment_yaml(),
make_mock_coredns_configmap_yaml(), [])
for param in PARAMETERS:
if param.name not in values:
continue
param.set(inputs, values[param.name])
self.assertTrue("success 200"
in inputs.configmap_yaml['data']['Corefile'])
self.assertTrue("denial 200"
in inputs.configmap_yaml['data']['Corefile'])
self.assertEqual(
'100m',
inputs.deployment_yaml['spec']['template']['spec']['containers']
[0]['resources']['limits']['cpu'])
def test_null_params(self):
# These should result in no limits.
values = {
'dnsmasq_cpu': None,
'dnsmasq_cache': 100,
'kubedns_cpu': None,
'max_qps': None,
'query_file': 'abc',
'run_length_seconds': 120,
}
inputs = Inputs(make_mock_yaml(), None, [])
for param in PARAMETERS:
if param.name not in values:
continue
param.set(inputs, values[param.name])
self.assertTrue(
'cpu' not in inputs.deployment_yaml\
['spec']['template']['spec']['containers'][0]['resources']['limits'])
self.assertTrue(
'cpu' not in inputs.deployment_yaml\
['spec']['template']['spec']['containers'][1]['resources']['limits'])
self.assertEqual(
'-l,120,-d,/queries/abc',
','.join(inputs.dnsperf_cmdline))
def test_TestCases(self):
tp = TestCases({
'kubedns_cpu': [100],
'dnsmasq_cpu': [200, 300],
'query_file': ['a', 'b'],
})
tc = tp.generate(set())
self.assertEqual(4, len(tc))
self.assertEqual(0, tc[0].run_subid)
self.assertEqual(
"[(<dnsmasq_cpu>, 200), (<kubedns_cpu>, 100), (<query_file>, 'a')]",
str(tc[0].pv))
self.assertEqual(
"[(<dnsmasq_cpu>, 200), (<kubedns_cpu>, 100), (<query_file>, 'b')]",
str(tc[1].pv))
self.assertEqual(1, tc[1].run_subid)
self.assertEqual(
"[(<dnsmasq_cpu>, 300), (<kubedns_cpu>, 100), (<query_file>, 'a')]",
str(tc[2].pv))
self.assertEqual(2, tc[2].run_subid)
self.assertEqual(
"[(<dnsmasq_cpu>, 300), (<kubedns_cpu>, 100), (<query_file>, 'b')]",
str(tc[3].pv))
self.assertEqual(3, tc[3].run_subid)
def test_TestCases_attributes(self):
tp = TestCases({
'kubedns_cpu': [100],
'dnsmasq_cpu': [200, 300],
'query_file': ['a', 'b'],
})
tc = tp.generate(set([ATTRIBUTE_CLUSTER_DNS]))
self.assertEqual(2, len(tc))
self.assertEqual(0, tc[0].run_subid)
self.assertEqual(
"[(<query_file>, 'a')]",
str(tc[0].pv))
self.assertEqual(
"[(<query_file>, 'b')]",
str(tc[1].pv))
self.assertEqual(1, tc[1].run_subid)
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
import time
from unittest import TestCase
from mock.mock import patch, MagicMock, ANY
from only_for_platform import get_platform, not_for_platform, os_distro_value, PLATFORM_WINDOWS
from ambari_commons.os_check import OSCheck
from resource_management.core import Environment
from resource_management.core.system import System
from resource_management.libraries import PropertiesFile
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
class TestPropertiesFIleResource(TestCase):
"""
PropertiesFile="resource_management.libraries.providers.properties_file.PropertiesFileProvider"
Testing PropertiesFile(PropertiesFileProvider) with different 'properties dictionary'
"""
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_empty_properties_without_dir(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'action_create' - creates new non existent file and write proper data
1) properties={}
2) dir=None
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 'Today is Wednesday'
with Environment('/') as env:
PropertiesFile('/somewhere_in_system/one_file.properties',
dir=None,
properties={}
)
create_file_mock.assert_called_with('/somewhere_in_system/one_file.properties', u'# Generated by Apache Ambari. Today is Wednesday\n \n ', encoding="UTF-8", on_file_created=ANY)
ensure_mock.assert_called()
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_empty_properties_with_dir(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'action_create' - creates new non existent file and write proper data
1) properties={}
2) dir='Some directory that exist '
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 'Some other day'
with Environment('/') as env:
PropertiesFile('file.txt',
dir="/dir/and/dir",
properties={},
)
create_file_mock.assert_called_with('/dir/and/dir/file.txt', u'# Generated by Apache Ambari. Some other day\n \n ', encoding="UTF-8", on_file_created=ANY)
ensure_mock.assert_called()
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_properties_simple(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'action_create' - creates new non existent file and write proper data
1) properties={"Some property":"Some value"}
2) dir=None
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 777
with Environment('/') as env:
PropertiesFile('/dir/new_file',
properties={'property1': 'value1'},
)
create_file_mock.assert_called_with('/dir/new_file', u'# Generated by Apache Ambari. 777\n \nproperty1=value1\n ', encoding="UTF-8", on_file_created=ANY)
ensure_mock.assert_called()
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_properties_with_metacharacters(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'action_create' - creates new non existent file and write proper data
1) properties={"":"", "Some property":"Metacharacters: -%{} ${a.a}/"}
2) dir=None
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 777
with Environment('/') as env:
PropertiesFile('/dir/new_file',
properties={"": "",
"prop.1": "'.'yyyy-MM-dd-HH",
"prop.3": "%d{ISO8601} %5p %c{1}:%L - %m%n",
"prop.2": "INFO, openjpa",
"prop.4": "${oozie.log.dir}/oozie.log",
"prop.empty": "",
},
)
create_file_mock.assert_called_with('/dir/new_file', u"# Generated by Apache Ambari. 777\n \n=\nprop.1='.'yyyy-MM-dd-HH\nprop.2=INFO, openjpa\nprop.3=%d{ISO8601} %5p %c{1}:%L - %m%n\nprop.4=${oozie.log.dir}/oozie.log\nprop.empty=\n ", encoding="UTF-8", on_file_created=ANY)
ensure_mock.assert_called()
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.read_file")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_properties_rewrite_content(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
read_file_mock,
ensure_mock):
"""
Tests if 'action_create' - rewrite file that exist
1) properties={"Some property":"Some value"}
2) dir="Some dir"
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = True
time_asctime_mock.return_value = 777
read_file_mock.return_value = 'old-content'
with Environment('/') as env:
PropertiesFile('new_file',
dir='/dir1',
properties={'property_1': 'value1'},
)
read_file_mock.assert_called()
create_file_mock.assert_called_with('/dir1/new_file', u'# Generated by Apache Ambari. 777\n \nproperty_1=value1\n ', encoding="UTF-8", on_file_created=ANY)
ensure_mock.assert_called()
|
|
#!/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Broadband Platform Version of Martin Mai BBcoda2.csh
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import glob
import random
import shutil
# Import Broadband modules
import bband_utils
import stas2files
import velocity_models
from station_list import StationList
from install_cfg import InstallCfg
from bbtoolbox_cfg import BBToolboxCfg
class BBToolbox(object):
def __init__(self, i_r_scattering, i_r_velmodel, i_r_srcfile,
i_r_srffile, i_r_stations, vmodel_name, sim_id=0):
"""
This function initializes basic class objects
"""
self.sim_id = sim_id
self.r_velmodel = i_r_velmodel
self.r_srcfile = i_r_srcfile
self.r_scattering = i_r_scattering
self.r_srffile = i_r_srffile
self.r_xyz_srffile = 'xyz_' + i_r_srffile
self.r_stations = i_r_stations
self.vmodel_name = vmodel_name
self.config = None
self.iseed = None
self.fmax = None
self.kappa = None
self.q_coda = None
self.fdec = None
self.source_func = None
self.gs_flag = None
self.ngaw_flag = None
self.tr_sca = None
self.afac = None
self.bfac = None
self.str_fac = None
self.correlation_file = None
self.infcorr_flag = None
def create_bbtoolbox_files(self, stat_file):
"""
This function creates the files needed by bbtoolbox, including
the scattering file (if not provided), the station file, and
the parameter file
"""
sta_base = os.path.basename(os.path.splitext(self.r_stations)[0])
a_indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))
a_tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))
a_tmpdir_mod = os.path.join(self.install.A_TMP_DATA_DIR,
str(self.sim_id),
"bbtoolbox_%s" % (sta_base))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
a_param_outdir = os.path.join(a_outdir, "param_files")
stat_list = StationList(stat_file)
# Get pointer to the velocity model object
vel_obj = velocity_models.get_velocity_model_by_name(self.vmodel_name)
if vel_obj is None:
raise bband_utils.ParameterError("Cannot find velocity model: %s" %
(self.vmodel_name))
vmodel_params = vel_obj.get_codebase_params('sdsu')
# Look for the source function parameter
if 'SOURCE_FUNC' in vmodel_params:
self.source_func = vmodel_params['SOURCE_FUNC']
# Look for correlation file parameter
if "CORRELATION_FILE" in vmodel_params:
# Set flag
self.infcorr_flag = 1
# Find correlation file
self.correlation_file = os.path.join(vel_obj.base_dir,
vmodel_params['CORRELATION_FILE'])
# Also copy file to bbtoolbox directory
shutil.copy2(self.correlation_file,
os.path.join(a_tmpdir_mod,
os.path.basename(self.correlation_file)))
else:
# Disable flag
self.infcorr_flag = 0
self.correlation_file = "correlation_file_not_used.txt"
# Take care of scattering file
if not self.r_scattering:
# Need to create our file
scattering_template = os.path.join(self.install.A_SDSU_DATA_DIR,
"scattering_generic.dat")
self.r_scattering = "scattering.dat"
a_scattering = os.path.join(a_indir, self.r_scattering)
# Look for KAPPA
if 'KAPPA' in vmodel_params:
self.kappa = float(vmodel_params['KAPPA'])
# Look for FMAX
if 'FMAX' in vmodel_params:
self.fmax = float(vmodel_params['FMAX'])
if 'Q' in vmodel_params:
self.q_coda = float(vmodel_params['Q'])
if 'FDEC' in vmodel_params:
self.fdec = float(vmodel_params['FDEC'])
if 'GS_FLAG' in vmodel_params:
self.gs_flag = float(vmodel_params['GS_FLAG'])
if 'NGAW_FLAG' in vmodel_params:
self.ngaw_flag = float(vmodel_params['NGAW_FLAG'])
if 'TR_SCA' in vmodel_params:
self.tr_sca = float(vmodel_params['TR_SCA'])
if 'AFAC' in vmodel_params:
self.afac = float(vmodel_params['AFAC'])
if 'BFAC' in vmodel_params:
self.bfac = float(vmodel_params['BFAC'])
if 'STR_FAC' in vmodel_params:
self.str_fac = float(vmodel_params['STR_FAC'])
# Check if we need to calculate stress
if 'CALCULATE_STRESS' in vmodel_params:
if float(vmodel_params['CALCULATE_STRESS']) == True:
# Calculate stress based on depth of hypocenter
self.str_fac = self.config.calculate_stress()
# Open template and output files
scat_in = open(scattering_template, 'r')
scat_out = open(a_scattering, 'w')
for line in scat_in:
if line.find(r"\* iseed - seed number for scattering") >= 0:
# This is the iseed line, insert the random iseed here
pos = line.find(r"\* iseed - seed number for scattering")
scat_out.write("%d %s" %
(self.iseed,
line[pos:]))
elif line.find(r"\* kappa - kappa at the site") >= 0:
# This is the kappa line, insert self.kappa here
pos = line.find(r"\* kappa - kappa at the site")
scat_out.write("%.3f %s" %
(self.kappa,
line[pos:]))
elif line.find(r"\* fmax - ") >= 0:
# This is the fmax line, insert self.fmax here
pos = line.find(r"\* fmax - ")
scat_out.write("%.2f %s" %
(self.fmax,
line[pos:]))
elif line.find(r"\* Q - Q for the coda") >= 0:
# This is the line, insert here
pos = line.find(r"\* Q - Q for the coda")
scat_out.write("%.1f %s" %
(self.q_coda,
line[pos:]))
elif line.find(r"\* fdec - see equation") >= 0:
# This is the line, insert here
pos = line.find(r"\* fdec - see equation")
scat_out.write("%.2f %s" %
(self.fdec,
line[pos:]))
elif line.find(r"\* gs_flag - determine type") >= 0:
# This is the line, insert here
pos = line.find(r"\* gs_flag - determine type")
scat_out.write("%d %s" %
(int(self.gs_flag),
line[pos:]))
elif line.find(r"\* ngaw_flag - GMPEs") >= 0:
# This is the line, insert here
pos = line.find(r"\* ngaw_flag - GMPEs")
scat_out.write("%d %s" %
(int(self.ngaw_flag),
line[pos:]))
elif line.find(r"\* Tr_sca - scaling factor") >= 0:
# This is the line, insert here
pos = line.find(r"\* Tr_sca - scaling factor")
scat_out.write("%.4f %s" %
(self.tr_sca,
line[pos:]))
elif line.find(r"\* afac - qk factor") >= 0:
# This is the line, insert here
pos = line.find(r"\* afac - qk factor")
scat_out.write("%.1f %s" %
(self.afac,
line[pos:]))
elif line.find(r"\* bfac - qk factor") >= 0:
# This is the line, insert here
pos = line.find(r"\* bfac - qk factor")
scat_out.write("%.1f %s" %
(self.bfac,
line[pos:]))
elif line.find(r"\* str_fac - Brune stress") >= 0:
# This is the line, insert here
pos = line.find(r"\* str_fac - Brune stress")
scat_out.write("%.2e %s" %
(self.str_fac,
line[pos:]))
elif line.find(r"\* cseed - seed number") >= 0:
# This is the line, insert here
pos = line.find(r"\* cseed - seed number")
scat_out.write("%d %s" %
(self.config.SEED,
line[pos:]))
elif line.find(r"\* infcorr_flag") >= 0:
# This is the line, insert here
pos = line.find(r"\* infcorr_flag")
scat_out.write("%d %s" %
(int(self.infcorr_flag),
line[pos:]))
else:
scat_out.write(line)
# Done
scat_in.close()
scat_out.flush()
scat_out.close()
# Keep copy of scattering file in outdata
shutil.copy2(a_scattering, os.path.join(a_param_outdir,
self.r_scattering))
# Convert station file
a_tmpfile = "station_%s.coords" % (sta_base)
a_sdsu_stat_list = os.path.join(a_tmpdir_mod,
"bbtstations_%s.tmp" % (sta_base))
a_sdsu_extended_fault = os.path.join(a_indir, "extended_fault")
param_filename = stas2files.bbp2sdsu_statlist(a_indir, stat_list,
a_sdsu_stat_list,
self.r_srffile,
self.r_xyz_srffile,
a_sdsu_extended_fault,
a_tmpfile)
r_faultfile = os.path.basename(a_sdsu_extended_fault)
# param_filename = stas2files.bbp2sdsu_statlist(a_indir, stat_list,
# a_sdsu_stat_list, hypo)
# now a_sdsu_stat_list has X Y name vs rho kappa
# a_sdsu_stat_list.par has bbextension, bbstat, bbhypo
# Build real station list
self.r_stations = "bbtstations_%s.dat" % (sta_base)
stalist_fp = open(os.path.join(a_indir, self.r_stations), 'w')
# write headers
stalist_fp.write("/* STATIONS FILE FOR BROAD-BAND COMPUTATION CODE " +
"(P.M. MAI & K.B.OLSEN) */\n")
stalist_fp.write("/* STATIONS COORDINATES ARE IN THE X-Y SYSTEM " +
"REPORTED IN FIG.1 OF APPENDIX A */\n\n")
stalist_fp.write("/* INPUT DIRECTORY */\n")
# Create input directory and file prefix for the stations files
file_prefix = os.path.join(a_tmpdir_mod, "%d." % (self.sim_id))
stalist_fp.write("%s\n\n" % (file_prefix))
stalist_fp.write("/* FILES FORMAT [RGF BIN CMP 3SF] */\n")
stalist_fp.write("\t3SF\n\n")
stalist_fp.write("/* FILES EXTENSION OR BINARY FILE NAME */\n")
glob_stat = "%s/*-lf.bbp" % (a_tmpdir)
bbp_list = glob.glob(glob_stat)
# Now, figure out the file suffix
if len(bbp_list) > 0:
file_suffix = "-lf.bbp"
else:
file_suffix = ".bbp"
# Write suffix
stalist_fp.write("%s\n\n" % (file_suffix))
# Write header for station list
stalist_fp.write("/*\tX\tY\tNAME\tVs\tRho\tKappa */\n")
# Now, append the station list we have in a_sdsu_stat_list
conv_list_fp = open(a_sdsu_stat_list, 'r')
for line in conv_list_fp:
stalist_fp.write(line)
# Figure out if station file path is too long
pieces = line.split()
st_name = pieces[2]
total_length = len(file_prefix) + len(st_name) + len(file_suffix)
if total_length >= bband_utils.SDSU_MAX_FILENAME:
# Close files
stalist_fp.close()
conv_list_fp.close()
raise ValueError("station path for %s " % (st_name) +
" is %d characters long, maximum is %d" %
(total_length, bband_utils.SDSU_MAX_FILENAME))
# Flush all data, and close this file
stalist_fp.flush()
stalist_fp.close()
# Close station file
conv_list_fp.close()
# Keep copy of station file in outdata
shutil.copy2(os.path.join(a_indir, self.r_stations),
os.path.join(a_param_outdir, self.r_stations))
# Read param file
conv_par_fp = open(param_filename, 'r')
conv_par_data = conv_par_fp.readlines()
conv_par_fp.close()
# 2nd line is hypo coordinates
hypo_line = conv_par_data[1].split(':')[1]
hypo_coords = []
for i in range(0, 3):
hypo_coords.append(hypo_line.split()[i])
min_box_dims = []
min_box_line = conv_par_data[0].split(':')[1]
for i in range(0, 2):
min_box_dims.append(float(min_box_line.split()[i]))
# FS: Feb-2013: Get magnitude directly from SRC file
# FS: Mar-2013: We use this magnitude only when we don't have
# a SRC file
# get magnitude from 3rd line
magnitude = float(conv_par_data[2].split(':')[1])
self.r_bbparfile = "%d_%s.bbpar" % (self.sim_id, sta_base)
parfile_name = os.path.join(a_indir, self.r_bbparfile)
parfile_fp = open(parfile_name, 'w')
parfile_fp.write("/* MODALITY FLAG: [0] LF-HF MERGING, " +
"[1] LF-SCATTERING, [2] LF-ISOCHRONE */\n")
parfile_fp.write(" %d\n" % (self.config.MODALITY))
parfile_fp.write("/* OUTPUT DIRECTORY */\n")
parfile_fp.write('"%s"\n' % a_tmpdir_mod)
parfile_fp.write('/* VELOCITY MODEL FILE (3D MODEL OR 1D MODEL) */\n')
parfile_fp.write('"%s"\n' %
(os.path.join(a_indir, self.r_velmodel)))
parfile_fp.write("/* STATIONS FILE REPORTING [X-Y] COORDINATES, " +
"FILENAMES AND PARAMETERS */\n")
parfile_fp.write('"%s"\n' %
(os.path.join(a_indir, self.r_stations)))
parfile_fp.write("/* OPTIONAL 2ND STATIONS FILE REPORTING ONLY " +
"FILENAMES - ONLY FOR MODALITY = 0 */\n")
parfile_fp.write("2ndstations.dat\n")
parfile_fp.write("/* FAULT MODEL TYPE: [POINT], " +
"[EXTENDED FAULT-MODEL FILE] */\n")
parfile_fp.write(' extended "%s"\n' %
(os.path.join(a_indir, r_faultfile)))
# parfile_fp.write(' point\n')
parfile_fp.write("/* HYPOCENTER COORDINATES [X-Y-Z] IN KM */\n")
parfile_fp.write("%.2f %.2f %.2f\n" % (float(hypo_coords[0]),
float(hypo_coords[1]),
float(hypo_coords[2])))
parfile_fp.write('/* GRID DEFINITION [X-Y-Z] FOR RAYTRACING: ' +
'"NEAR-SIDE", GRID-SPACING (IN KM) */\n')
parfile_fp.write("0.0 0.0 0.0 1.0\n")
parfile_fp.write('/* GRID DEFINITION [X-Y-Z] FOR RAYTRACING: ' +
'"FAR-SIDE" (IN KM) */\n')
if self.config.grid_x is not None and self.config.grid_y is not None:
parfile_fp.write("%.1f %.1f %.1f\n" %
(self.config.grid_x,
self.config.grid_y,
self.config.grid_z))
else:
parfile_fp.write("%.1f %.1f %.1f\n" %
(round(min_box_dims[0] + 20.0, 0),
round(min_box_dims[1] + 20.0, 0),
self.config.grid_z))
parfile_fp.write("/* SCATTERING PARAMETERS FILE */\n")
parfile_fp.write('"%s"\n' %
(os.path.join(a_indir, self.r_scattering)))
parfile_fp.write("/* EVENT MAGNITUDE */\n")
if self.config.MAG is None:
parfile_fp.write("%.2f\n" % (magnitude))
else:
parfile_fp.write("%.2f\n" % (self.config.MAG))
parfile_fp.write("/* DOMINANT SOURCE MECHANISM [SS RS NS AL] */\n")
parfile_fp.write("%s\n" % conv_par_data[3].split(":")[1].strip())
parfile_fp.write("/* SOURCE TIME FUNCTION "
"[TRI BOX YOF DREG LIU USER-DEF] */\n")
parfile_fp.write("%s\n" % (self.source_func))
parfile_fp.write("/* VERBOSE MODE [ON OFF] */\n")
parfile_fp.write("off\n")
parfile_fp.write("/* SRF FILE */\n")
parfile_fp.write('"%s"\n' %
(os.path.join(a_indir, self.r_xyz_srffile)))
parfile_fp.write("/* CORRELATION FILE */\n")
parfile_fp.write("%s\n" %
(os.path.basename(self.correlation_file)))
parfile_fp.write("/* RAKE */\n")
parfile_fp.write("%.2f\n" % (self.config.RAKE))
parfile_fp.flush()
parfile_fp.close()
# Keep a copy in the outdata directory
shutil.copy2(parfile_name, os.path.join(a_param_outdir,
self.r_bbparfile))
def run(self):
"""
This function prepares the parameter file for BBToolbox, and
then invokes it
"""
print("SDSU BBToolBox".center(80, '-'))
self.install = InstallCfg.getInstance()
install = self.install
sim_id = self.sim_id
# Build path names
sta_base = os.path.basename(os.path.splitext(self.r_stations)[0])
self.log = os.path.join(install.A_OUT_LOG_DIR,
str(sim_id),
"%d.bbtoolbox_%s.log" % (sim_id, sta_base))
a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_tmpdir_mod = os.path.join(install.A_TMP_DATA_DIR,
str(sim_id),
"bbtoolbox_%s" % (sta_base))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_param_outdir = os.path.join(a_outdir, "param_files")
#
# Make sure the output and two tmp directories exist
#
bband_utils.mkdirs([a_tmpdir, a_tmpdir_mod, a_outdir, a_param_outdir],
print_cmd=False)
# Make sure BBToolbox works when starting from a srf file
if self.r_srcfile:
a_srcfile = os.path.join(a_indir, self.r_srcfile)
else:
a_srcfile = ""
self.config = BBToolboxCfg(a_srcfile=a_srcfile)
# Set default parameters
config = self.config
# Initialize random number with seed and calculate new iseed
random.seed(config.SEED)
self.iseed = int(random.random() * 10000)
self.fmax = config.FMAX
self.kappa = config.KAPPA
self.q_coda = config.Q_CODA
self.fdec = config.FDEC
self.source_func = config.SOURCE_FUNC
self.gs_flag = config.GS_FLAG
self.ngaw_flag = config.NGAW_FLAG
self.tr_sca = config.TR_SCA
self.afac = config.AFAC
self.bfac = config.BFAC
self.str_fac = config.STR_FAC
# Write valid par file, which includes correct references to
# output dir, velocity model, stations list, fault
# description, and scattering
a_stations = os.path.join(a_indir, self.r_stations)
# Need to create SDSU's BBToolbox files first
self.create_bbtoolbox_files(a_stations)
a_stations = os.path.join(a_indir,
"bbtstations_%s.dat" %
(sta_base))
parfilename = os.path.join(a_tmpdir_mod, "parfilename_%s" % (sta_base))
filename_fp = open(parfilename, "w")
filename_fp.write('"%s"\n' % (os.path.join(a_indir, self.r_bbparfile)))
filename_fp.flush()
filename_fp.close()
# Get list of stations
stat_file_fp = open(a_stations, "r")
data = stat_file_fp.readlines()
stat_file_fp.close()
for i in range(0, len(data)):
pieces = data[i].split()
if len(pieces) > 1:
if pieces[1] == "X":
break
stat_names = []
for j in range(i + 1, len(data)):
pieces = data[j].split()
stat_names.append(pieces[2])
# Check if we have stations in our list
if len(stat_names) == 0:
# No stations! We should output an error
raise ValueError("No stations in the station list!")
# Stagein seismograms to working dir
for i in range(0, len(stat_names)):
glob_list = glob.glob("%s/%d.%s*" % (a_tmpdir, sim_id,
stat_names[i]))
for seismogram_file in glob_list:
basename = os.path.basename(seismogram_file)
shutil.copy2(seismogram_file, os.path.join(a_tmpdir_mod,
basename))
# Run in tmpdir subdir to isolate temp fortran files
os.chdir(a_tmpdir_mod)
cmd = "%s/BBtoolbox.exe < %s >> %s 2>&1" % (install.A_SDSU_BIN_DIR,
parfilename, self.log)
bband_utils.runprog(cmd, abort_on_error=True)
for i in range(0, len(stat_names)):
shutil.copy2("%s/BB.%s.hyb" % (a_tmpdir_mod, stat_names[i]),
"%s/%d.%s.bbp" % (a_tmpdir, sim_id, stat_names[i]))
if config.copy_lf_seismograms:
a_param_outdir = os.path.join(a_outdir, "param_files")
for i in range(0, len(stat_names)):
# Keep copy of lf seismogram files in outdata
shutil.copy2("%s/%s.%s-lf.bbp" % (a_tmpdir, sim_id, stat_names[i]),
"%s/%s.%s-lf.bbp" % (a_param_outdir, sim_id, stat_names[i]))
# Change to tmpdir so run.log file is put in tmpdir
os.chdir(a_tmpdir)
print("SDSU BBToolBox Completed".center(80, '-'))
if __name__ == "__main__":
print("Testing Module: %s" % (os.path.basename(sys.argv[0])))
ME = BBToolbox(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4],
sys.argv[5], None, sim_id=int(sys.argv[6]))
ME.run()
|
|
from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.locations import build_prefix, virtualenv_no_global, distutils_scheme
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip7Warning, RemovedInPip8Warning
logger = logging.getLogger(__name__)
class InstallCommand(Command):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.editable.make())
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.src.make())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. This process is recursive regardless of whether '
'a dependency is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="DEPRECATED. Download and unpack all packages, but don't "
"actually install them."
)
cmd_opts.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="DEPRECATED. Don't download any packages, just install the "
"ones already downloaded (completes an install run with "
"--no-install).")
cmd_opts.add_option(cmdoptions.install_options.make())
cmd_opts.add_option(cmdoptions.global_options.make())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help='Install using the user scheme.')
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if (
options.no_install or
options.no_download
):
warnings.warn(
"--no-install and --no-download are deprecated. "
"See https://github.com/pypa/pip/issues/906.",
RemovedInPip7Warning,
)
if options.download_dir:
options.no_install = True
options.ignore_installed = True
# If we have --no-install or --no-download and no --build we use the
# legacy static build dir
if (options.build_dir is None
and (options.no_install or options.no_download)):
options.build_dir = build_prefix
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir)
and not os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, None, isolated=options.isolated_mode,
)
)
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
name,
default_vcs=options.default_vcs,
isolated=options.isolated_mode,
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder, options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
opts = {'name': self.name}
if options.find_links:
msg = ('You must give at least one requirement to '
'%(name)s (maybe you meant "pip %(name)s '
'%(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warning(msg)
return
try:
if not options.no_download:
requirement_set.prepare_files(finder)
else:
requirement_set.locate_files()
if not options.no_install:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if ((not options.no_clean)
and ((not options.no_install)
or options.download_dir)):
requirement_set.cleanup_files()
if options.target_dir:
if not os.path.exists(options.target_dir):
os.makedirs(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
|
|
import logging
import os
import typing
from dvc.exceptions import OutputNotFoundError
from dvc.utils import relpath
from ..progress import DEFAULT_CALLBACK
from ._metadata import Metadata
from .base import FileSystem
if typing.TYPE_CHECKING:
from dvc.output import Output
from dvc.types import AnyPath
logger = logging.getLogger(__name__)
class DvcFileSystem(FileSystem): # pylint:disable=abstract-method
"""DVC repo fs.
Args:
repo: DVC repo.
"""
sep = os.sep
scheme = "local"
PARAM_CHECKSUM = "md5"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.repo = kwargs["repo"]
@property
def config(self):
raise NotImplementedError
def _find_outs(self, path, *args, **kwargs):
outs = self.repo.find_outs_by_path(path, *args, **kwargs)
def _is_cached(out):
return out.use_cache
outs = list(filter(_is_cached, outs))
if not outs:
raise OutputNotFoundError(path, self.repo)
return outs
def _get_granular_hash(self, path: "AnyPath", out: "Output", remote=None):
# NOTE: use string paths here for performance reasons
key = tuple(relpath(path, out.fs_path).split(os.sep))
out.get_dir_cache(remote=remote)
if out.obj is None:
raise FileNotFoundError
(_, oid) = out.obj.trie.get(key) or (None, None)
if oid:
return oid
raise FileNotFoundError
def _get_fs_path(self, path: "AnyPath", remote=None):
try:
outs = self._find_outs(path, strict=False)
except OutputNotFoundError as exc:
raise FileNotFoundError from exc
if len(outs) != 1 or (
outs[0].is_dir_checksum and path == outs[0].fs_path
):
raise IsADirectoryError
out = outs[0]
if not out.hash_info:
raise FileNotFoundError
if out.changed_cache(filter_info=path):
from dvc.config import NoRemoteError
try:
remote_odb = self.repo.cloud.get_remote_odb(remote)
except NoRemoteError as exc:
raise FileNotFoundError from exc
if out.is_dir_checksum:
checksum = self._get_granular_hash(path, out).value
else:
checksum = out.hash_info.value
remote_fs_path = remote_odb.hash_to_path(checksum)
return remote_odb.fs, remote_fs_path
if out.is_dir_checksum:
checksum = self._get_granular_hash(path, out).value
cache_path = out.odb.fs.unstrip_protocol(
out.odb.hash_to_path(checksum)
)
else:
cache_path = out.cache_path
return out.odb.fs, cache_path
def open( # type: ignore
self, path: str, mode="r", encoding=None, **kwargs
): # pylint: disable=arguments-renamed
fs, fspath = self._get_fs_path(path, **kwargs)
return fs.open(fspath, mode=mode, encoding=encoding)
def exists(self, path): # pylint: disable=arguments-renamed
try:
self.metadata(path)
return True
except FileNotFoundError:
return False
def isdir(self, path): # pylint: disable=arguments-renamed
try:
meta = self.metadata(path)
return meta.isdir
except FileNotFoundError:
return False
def check_isdir(self, path, outs):
if len(outs) != 1:
return True
out = outs[0]
if not out.is_dir_checksum:
return out.fs_path != path
if out.fs_path == path:
return True
try:
self._get_granular_hash(path, out)
return False
except FileNotFoundError:
return True
def isfile(self, path): # pylint: disable=arguments-renamed
try:
meta = self.metadata(path)
return meta.isfile
except FileNotFoundError:
return False
def _fetch_dir(self, out, **kwargs):
# pull dir cache if needed
out.get_dir_cache(**kwargs)
if not out.obj:
raise FileNotFoundError
def _add_dir(self, trie, out, **kwargs):
self._fetch_dir(out, **kwargs)
base = out.fs.path.parts(out.fs_path)
for key, _, _ in out.obj: # noqa: B301
trie[base + key] = None
def _walk(self, root, trie, topdown=True, **kwargs):
dirs = set()
files = []
root_parts = self.path.parts(root)
out = trie.get(root_parts)
if out and out.is_dir_checksum:
self._add_dir(trie, out, **kwargs)
root_len = len(root_parts)
try:
for key, out in trie.iteritems(prefix=root_parts): # noqa: B301
if key == root_parts:
continue
name = key[root_len]
if len(key) > root_len + 1 or (out and out.is_dir_checksum):
dirs.add(name)
continue
files.append(name)
except KeyError:
pass
assert topdown
dirs = list(dirs)
yield root, dirs, files
for dname in dirs:
yield from self._walk(self.path.join(root, dname), trie)
def walk(self, top, topdown=True, onerror=None, **kwargs):
from pygtrie import Trie
assert topdown
root = os.path.abspath(top)
try:
meta = self.metadata(root)
except FileNotFoundError:
if onerror is not None:
onerror(FileNotFoundError(top))
return
if not meta.isdir:
if onerror is not None:
onerror(NotADirectoryError(top))
return
trie = Trie()
for out in meta.outs:
trie[out.fs.path.parts(out.fs_path)] = out
if out.is_dir_checksum and self.path.isin_or_eq(root, out.fs_path):
self._add_dir(trie, out, **kwargs)
yield from self._walk(root, trie, topdown=topdown, **kwargs)
def find(self, path, prefix=None):
for root, _, files in self.walk(path):
for fname in files:
# NOTE: os.path.join is ~5.5 times slower
yield f"{root}{os.sep}{fname}"
def isdvc(self, path, recursive=False, strict=True):
try:
meta = self.metadata(path)
except FileNotFoundError:
return False
recurse = recursive or not strict
return meta.output_exists if recurse else meta.is_output
def isexec(self, path): # pylint: disable=unused-argument
return False
def metadata(self, fs_path):
abspath = os.path.abspath(fs_path)
try:
outs = self._find_outs(abspath, strict=False, recursive=True)
except OutputNotFoundError as exc:
raise FileNotFoundError from exc
meta = Metadata(fs_path=abspath, outs=outs, repo=self.repo)
meta.isdir = meta.isdir or self.check_isdir(meta.fs_path, meta.outs)
return meta
def info(self, path):
meta = self.metadata(path)
ret = {"type": "directory" if meta.isdir else "file"}
if meta.is_output and len(meta.outs) == 1 and meta.outs[0].hash_info:
out = meta.outs[0]
ret["size"] = out.meta.size
ret[out.hash_info.name] = out.hash_info.value
elif meta.part_of_output:
(out,) = meta.outs
key = self.path.parts(self.path.relpath(path, out.fs_path))
(obj_meta, oid) = out.obj.trie.get(key) or (None, None)
if oid:
ret["size"] = obj_meta.size if obj_meta else 0
ret[oid.name] = oid.value
return ret
def get_file(
self, from_info, to_file, callback=DEFAULT_CALLBACK, **kwargs
):
fs, path = self._get_fs_path(from_info)
fs.get_file( # pylint: disable=protected-access
path, to_file, callback=callback, **kwargs
)
def checksum(self, path):
info = self.info(path)
md5 = info.get("md5")
if md5:
return md5
raise NotImplementedError
|
|
# -*- test-case-name: twisted.python.test.test_components -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Component architecture for Twisted, based on Zope3 components.
Using the Zope3 API directly is strongly recommended. Everything
you need is in the top-level of the zope.interface package, e.g.::
from zope.interface import Interface, implements
class IFoo(Interface):
pass
class Foo:
implements(IFoo)
print IFoo.implementedBy(Foo) # True
print IFoo.providedBy(Foo()) # True
L{twisted.python.components.registerAdapter} from this module may be used to
add to Twisted's global adapter registry.
L{twisted.python.components.proxyForInterface} is a factory for classes
which allow access to only the parts of another class defined by a specified
interface.
"""
# zope3 imports
from zope.interface import interface, declarations
from zope.interface.adapter import AdapterRegistry
# twisted imports
from twisted.python import reflect
from twisted.persisted import styles
# Twisted's global adapter registry
globalRegistry = AdapterRegistry()
# Attribute that registerAdapter looks at. Is this supposed to be public?
ALLOW_DUPLICATES = 0
# Define a function to find the registered adapter factory, using either a
# version of Zope Interface which has the `registered' method or an older
# version which does not.
if getattr(AdapterRegistry, 'registered', None) is None:
def _registered(registry, required, provided):
"""
Return the adapter factory for the given parameters in the given
registry, or None if there is not one.
"""
return registry.get(required).selfImplied.get(provided, {}).get('')
else:
def _registered(registry, required, provided):
"""
Return the adapter factory for the given parameters in the given
registry, or None if there is not one.
"""
return registry.registered([required], provided)
def registerAdapter(adapterFactory, origInterface, *interfaceClasses):
"""Register an adapter class.
An adapter class is expected to implement the given interface, by
adapting instances implementing 'origInterface'. An adapter class's
__init__ method should accept one parameter, an instance implementing
'origInterface'.
"""
self = globalRegistry
assert interfaceClasses, "You need to pass an Interface"
global ALLOW_DUPLICATES
# deal with class->interface adapters:
if not isinstance(origInterface, interface.InterfaceClass):
origInterface = declarations.implementedBy(origInterface)
for interfaceClass in interfaceClasses:
factory = _registered(self, origInterface, interfaceClass)
if factory is not None and not ALLOW_DUPLICATES:
raise ValueError("an adapter (%s) was already registered." % (factory, ))
for interfaceClass in interfaceClasses:
self.register([origInterface], interfaceClass, '', adapterFactory)
def getAdapterFactory(fromInterface, toInterface, default):
"""Return registered adapter for a given class and interface.
Note that is tied to the *Twisted* global registry, and will
thus not find adapters registered elsewhere.
"""
self = globalRegistry
if not isinstance(fromInterface, interface.InterfaceClass):
fromInterface = declarations.implementedBy(fromInterface)
factory = self.lookup1(fromInterface, toInterface)
if factory is None:
factory = default
return factory
def _addHook(registry):
"""
Add an adapter hook which will attempt to look up adapters in the given
registry.
@type registry: L{zope.interface.adapter.AdapterRegistry}
@return: The hook which was added, for later use with L{_removeHook}.
"""
lookup = registry.lookup1
def _hook(iface, ob):
factory = lookup(declarations.providedBy(ob), iface)
if factory is None:
return None
else:
return factory(ob)
interface.adapter_hooks.append(_hook)
return _hook
def _removeHook(hook):
"""
Remove a previously added adapter hook.
@param hook: An object previously returned by a call to L{_addHook}. This
will be removed from the list of adapter hooks.
"""
interface.adapter_hooks.remove(hook)
# add global adapter lookup hook for our newly created registry
_addHook(globalRegistry)
def getRegistry():
"""Returns the Twisted global
C{zope.interface.adapter.AdapterRegistry} instance.
"""
return globalRegistry
# FIXME: deprecate attribute somehow?
CannotAdapt = TypeError
class Adapter:
"""I am the default implementation of an Adapter for some interface.
This docstring contains a limerick, by popular demand::
Subclassing made Zope and TR
much harder to work with by far.
So before you inherit,
be sure to declare it
Adapter, not PyObject*
@cvar temporaryAdapter: If this is True, the adapter will not be
persisted on the Componentized.
@cvar multiComponent: If this adapter is persistent, should it be
automatically registered for all appropriate interfaces.
"""
# These attributes are used with Componentized.
temporaryAdapter = 0
multiComponent = 1
def __init__(self, original):
"""Set my 'original' attribute to be the object I am adapting.
"""
self.original = original
def __conform__(self, interface):
"""
I forward __conform__ to self.original if it has it, otherwise I
simply return None.
"""
if hasattr(self.original, "__conform__"):
return self.original.__conform__(interface)
return None
def isuper(self, iface, adapter):
"""
Forward isuper to self.original
"""
return self.original.isuper(iface, adapter)
class Componentized(styles.Versioned):
"""I am a mixin to allow you to be adapted in various ways persistently.
I define a list of persistent adapters. This is to allow adapter classes
to store system-specific state, and initialized on demand. The
getComponent method implements this. You must also register adapters for
this class for the interfaces that you wish to pass to getComponent.
Many other classes and utilities listed here are present in Zope3; this one
is specific to Twisted.
"""
persistenceVersion = 1
def __init__(self):
self._adapterCache = {}
def locateAdapterClass(self, klass, interfaceClass, default):
return getAdapterFactory(klass, interfaceClass, default)
def setAdapter(self, interfaceClass, adapterClass):
self.setComponent(interfaceClass, adapterClass(self))
def addAdapter(self, adapterClass, ignoreClass=0):
"""Utility method that calls addComponent. I take an adapter class and
instantiate it with myself as the first argument.
@return: The adapter instantiated.
"""
adapt = adapterClass(self)
self.addComponent(adapt, ignoreClass)
return adapt
def setComponent(self, interfaceClass, component):
"""
"""
self._adapterCache[reflect.qual(interfaceClass)] = component
def addComponent(self, component, ignoreClass=0):
"""
Add a component to me, for all appropriate interfaces.
In order to determine which interfaces are appropriate, the component's
provided interfaces will be scanned.
If the argument 'ignoreClass' is True, then all interfaces are
considered appropriate.
Otherwise, an 'appropriate' interface is one for which its class has
been registered as an adapter for my class according to the rules of
getComponent.
@return: the list of appropriate interfaces
"""
for iface in declarations.providedBy(component):
if (ignoreClass or
(self.locateAdapterClass(self.__class__, iface, None)
== component.__class__)):
self._adapterCache[reflect.qual(iface)] = component
def unsetComponent(self, interfaceClass):
"""Remove my component specified by the given interface class."""
del self._adapterCache[reflect.qual(interfaceClass)]
def removeComponent(self, component):
"""
Remove the given component from me entirely, for all interfaces for which
it has been registered.
@return: a list of the interfaces that were removed.
"""
l = []
for k, v in self._adapterCache.items():
if v is component:
del self._adapterCache[k]
l.append(reflect.namedObject(k))
return l
def getComponent(self, interface, default=None):
"""Create or retrieve an adapter for the given interface.
If such an adapter has already been created, retrieve it from the cache
that this instance keeps of all its adapters. Adapters created through
this mechanism may safely store system-specific state.
If you want to register an adapter that will be created through
getComponent, but you don't require (or don't want) your adapter to be
cached and kept alive for the lifetime of this Componentized object,
set the attribute 'temporaryAdapter' to True on your adapter class.
If you want to automatically register an adapter for all appropriate
interfaces (with addComponent), set the attribute 'multiComponent' to
True on your adapter class.
"""
k = reflect.qual(interface)
if k in self._adapterCache:
return self._adapterCache[k]
else:
adapter = interface.__adapt__(self)
if adapter is not None and not (
hasattr(adapter, "temporaryAdapter") and
adapter.temporaryAdapter):
self._adapterCache[k] = adapter
if (hasattr(adapter, "multiComponent") and
adapter.multiComponent):
self.addComponent(adapter)
if adapter is None:
return default
return adapter
def __conform__(self, interface):
return self.getComponent(interface)
class ReprableComponentized(Componentized):
def __init__(self):
Componentized.__init__(self)
def __repr__(self):
from cStringIO import StringIO
from pprint import pprint
sio = StringIO()
pprint(self._adapterCache, sio)
return sio.getvalue()
def proxyForInterface(iface, originalAttribute='original'):
"""
Create a class which proxies all method calls which adhere to an interface
to another provider of that interface.
This function is intended for creating specialized proxies. The typical way
to use it is by subclassing the result::
class MySpecializedProxy(proxyForInterface(IFoo)):
def someInterfaceMethod(self, arg):
if arg == 3:
return 3
return self.original.someInterfaceMethod(arg)
@param iface: The Interface to which the resulting object will conform, and
which the wrapped object must provide.
@param originalAttribute: name of the attribute used to save the original
object in the resulting class. Default to C{original}.
@type originalAttribute: C{str}
@return: A class whose constructor takes the original object as its only
argument. Constructing the class creates the proxy.
"""
def __init__(self, original):
setattr(self, originalAttribute, original)
contents = {"__init__": __init__}
for name in iface:
contents[name] = _ProxyDescriptor(name, originalAttribute)
proxy = type("(Proxy for %s)"
% (reflect.qual(iface),), (object,), contents)
declarations.classImplements(proxy, iface)
return proxy
class _ProxiedClassMethod(object):
"""
A proxied class method.
@ivar methodName: the name of the method which this should invoke when
called.
@type methodName: C{str}
@ivar originalAttribute: name of the attribute of the proxy where the
original object is stored.
@type orginalAttribute: C{str}
"""
def __init__(self, methodName, originalAttribute):
self.methodName = methodName
self.originalAttribute = originalAttribute
def __call__(self, oself, *args, **kw):
"""
Invoke the specified L{methodName} method of the C{original} attribute
for proxyForInterface.
@param oself: an instance of a L{proxyForInterface} object.
@return: the result of the underlying method.
"""
original = getattr(oself, self.originalAttribute)
actualMethod = getattr(original, self.methodName)
return actualMethod(*args, **kw)
class _ProxyDescriptor(object):
"""
A descriptor which will proxy attribute access, mutation, and
deletion to the L{original} attribute of the object it is being accessed
from.
@ivar attributeName: the name of the attribute which this descriptor will
retrieve from instances' C{original} attribute.
@type attributeName: C{str}
@ivar originalAttribute: name of the attribute of the proxy where the
original object is stored.
@type orginalAttribute: C{str}
"""
def __init__(self, attributeName, originalAttribute):
self.attributeName = attributeName
self.originalAttribute = originalAttribute
def __get__(self, oself, type=None):
"""
Retrieve the C{self.attributeName} property from L{oself}.
"""
if oself is None:
return _ProxiedClassMethod(self.attributeName,
self.originalAttribute)
original = getattr(oself, self.originalAttribute)
return getattr(original, self.attributeName)
def __set__(self, oself, value):
"""
Set the C{self.attributeName} property of L{oself}.
"""
original = getattr(oself, self.originalAttribute)
setattr(original, self.attributeName, value)
def __delete__(self, oself):
"""
Delete the C{self.attributeName} property of L{oself}.
"""
original = getattr(oself, self.originalAttribute)
delattr(original, self.attributeName)
__all__ = [
# Sticking around:
"registerAdapter", "getAdapterFactory",
"Adapter", "Componentized", "ReprableComponentized", "getRegistry",
"proxyForInterface",
]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExtensionsOperations:
"""ExtensionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.hdinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _enable_monitoring_initial(
self,
resource_group_name: str,
cluster_name: str,
parameters: "_models.ClusterMonitoringRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._enable_monitoring_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterMonitoringRequest')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_enable_monitoring_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring'} # type: ignore
async def begin_enable_monitoring(
self,
resource_group_name: str,
cluster_name: str,
parameters: "_models.ClusterMonitoringRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Enables the Operations Management Suite (OMS) on the HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The Operations Management Suite (OMS) workspace parameters.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterMonitoringRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._enable_monitoring_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_enable_monitoring.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring'} # type: ignore
async def get_monitoring_status(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> "_models.ClusterMonitoringResponse":
"""Gets the status of Operations Management Suite (OMS) on the HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClusterMonitoringResponse, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.ClusterMonitoringResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterMonitoringResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get_monitoring_status.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ClusterMonitoringResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_monitoring_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring'} # type: ignore
async def _disable_monitoring_initial(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._disable_monitoring_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disable_monitoring_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring'} # type: ignore
async def begin_disable_monitoring(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Disables the Operations Management Suite (OMS) on the HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._disable_monitoring_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disable_monitoring.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring'} # type: ignore
async def _enable_azure_monitor_initial(
self,
resource_group_name: str,
cluster_name: str,
parameters: "_models.AzureMonitorRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._enable_azure_monitor_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureMonitorRequest')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_enable_azure_monitor_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor'} # type: ignore
async def begin_enable_azure_monitor(
self,
resource_group_name: str,
cluster_name: str,
parameters: "_models.AzureMonitorRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Enables the Azure Monitor on the HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The Log Analytics workspace parameters.
:type parameters: ~azure.mgmt.hdinsight.models.AzureMonitorRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._enable_azure_monitor_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_enable_azure_monitor.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor'} # type: ignore
async def get_azure_monitor_status(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> "_models.AzureMonitorResponse":
"""Gets the status of Azure Monitor on the HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureMonitorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.AzureMonitorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureMonitorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get_azure_monitor_status.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureMonitorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_azure_monitor_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor'} # type: ignore
async def _disable_azure_monitor_initial(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._disable_azure_monitor_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disable_azure_monitor_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor'} # type: ignore
async def begin_disable_azure_monitor(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Disables the Azure Monitor on the HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._disable_azure_monitor_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disable_azure_monitor.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
parameters: "_models.Extension",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Extension')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
parameters: "_models.Extension",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Creates an HDInsight cluster extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param extension_name: The name of the cluster extension.
:type extension_name: str
:param parameters: The cluster extensions create request.
:type parameters: ~azure.mgmt.hdinsight.models.Extension
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
extension_name=extension_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
**kwargs: Any
) -> "_models.ClusterMonitoringResponse":
"""Gets the extension properties for the specified HDInsight cluster extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param extension_name: The name of the cluster extension.
:type extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClusterMonitoringResponse, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.ClusterMonitoringResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterMonitoringResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ClusterMonitoringResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified extension for HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param extension_name: The name of the cluster extension.
:type extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
extension_name=extension_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}'} # type: ignore
async def get_azure_async_operation_status(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.AsyncOperationResult":
"""Gets the async operation status.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param extension_name: The name of the cluster extension.
:type extension_name: str
:param operation_id: The long running operation id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AsyncOperationResult, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.AsyncOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AsyncOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get_azure_async_operation_status.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'extensionName': self._serialize.url("extension_name", extension_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AsyncOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_azure_async_operation_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}/azureAsyncOperations/{operationId}'} # type: ignore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.