gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import copy
import operator
from functools import wraps, update_wrapper
import sys
from django.utils import six
from django.utils.six.moves import copyreg
# You can't trivially replace this `functools.partial` because this binds to
# classes and returns bound instances, whereas functools.partial (on CPython)
# is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
@wraps(func)
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wrapper
class cached_property(object):
"""
Decorator that creates converts a method with a single
self argument into a property cached on the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, type):
res = instance.__dict__[self.func.__name__] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for type_ in reversed(resultclass.mro()):
for (k, v) in type_.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), "Cannot call lazy() with both bytes and text return types."
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, method):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = method
return __wrapper__
__promise__ = classmethod(__promise__)
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and not six.PY3:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
else:
raise AssertionError('__mod__ not supported for non-string types')
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(six.itervalues(kwargs)):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wrapper
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
def __init__(self):
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError
# introspection support:
__dir__ = new_method_proxy(dir)
# Workaround for http://bugs.python.org/issue12370
_super = super
class SimpleLazyObject(LazyObject):
"""
A lazy object initialised from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
_super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
else:
return copy.deepcopy(self._wrapped, memo)
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. It also appears to stop __reduce__ from being
# called. So, we define __getstate__ in a way that cooperates with the way
# that pickle interprets this class. This fails when the wrapped class is a
# builtin, but it is better than nothing.
def __getstate__(self):
if self._wrapped is empty:
self._setup()
return self._wrapped.__dict__
# Python 3.3 will call __reduce__ when pickling; this method is needed
# to serialize and deserialize correctly.
@classmethod
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __reduce_ex__(self, proto):
if proto >= 2:
# On Py3, since the default protocol is 3, pickle uses the
# ``__newobj__`` method (& more efficient opcodes) for writing.
return (self.__newobj__, (self.__class__,), self.__getstate__())
else:
# On Py2, the default protocol is 0 (for back-compat) & the above
# code fails miserably (see regression test). Instead, we return
# exactly what's returned if there's no ``__reduce__`` method at
# all.
return (copyreg._reconstructor, (self.__class__, object, None), self.__getstate__())
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__hash__ = new_method_proxy(hash)
__bool__ = new_method_proxy(bool) # Python 3
__nonzero__ = __bool__ # Python 2
class lazy_property(property):
"""
A property that works with subclasses by wrapping the decorated
functions of the base class.
"""
def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
if fget is not None:
@wraps(fget)
def fget(instance, instance_type=None, name=fget.__name__):
return getattr(instance, name)()
if fset is not None:
@wraps(fset)
def fset(instance, value, name=fset.__name__):
return getattr(instance, name)(value)
if fdel is not None:
@wraps(fdel)
def fdel(instance, name=fdel.__name__):
return getattr(instance, name)()
return property(fget, fset, fdel, doc)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
if sys.version_info >= (2,7,2):
from functools import total_ordering
else:
# For Python < 2.7.2. Python 2.6 does not have total_ordering, and
# total_ordering in 2.7 versions prior to 2.7.2 is buggy. See
# http://bugs.python.org/issue10042 for details. For these versions use
# code borrowed from Python 2.7.3.
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
| |
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
""" Selenium keywords for Wait Actions """
from Framework.ClassUtils.WSelenium.element_locator import ElementLocator
from Framework.ClassUtils.WSelenium.wait_operations import WaitOperations
from Framework.ClassUtils.json_utils_class import JsonUtils
try:
import json
import os
import sys
import re
import getopt
import datetime
import Framework.Utils as Utils
except ImportWarning:
raise ImportError
from Framework.Utils import data_Utils
from Framework.Utils.testcase_Utils import pNote, pSubStep
from Framework.Utils import xml_Utils
from Framework.Utils import selenium_Utils
class wait_actions(object):
"""This class has the functionality to wait till an event has happened on
the webpage - such as the browser will wait till an element is clickable,
visible, or present on a webpage. Implicit wait can also be set for a
webpage with this class """
def __init__(self):
"""This is a constructor for the wait_actions class"""
self.resultfile = Utils.config_Utils.resultfile
self.datafile = Utils.config_Utils.datafile
self.logsdir = Utils.config_Utils.logsdir
self.filename = Utils.config_Utils.filename
self.logfile = Utils.config_Utils.logfile
self.jsonobj = JsonUtils()
self.wait_oper_object = WaitOperations()
self.element_locator_obj = ElementLocator()
def set_implicit_wait(self, system_name, timeout, browser_name="all",
element_config_file=None, element_tag=None):
"""
This keyword would permanently set the implicit wait time for given
browser instance(s)
:Datafile Usage:
Tags or attributes to be used in input datafile for the system or
subsystem. If both tag and attribute is provided the attribute will
be used.
1. system_name = This attribute can be specified in the datafile as
a <system> tag directly under the <credentials>
tag. An attribute "name" has to be added to this
tag and the value of that attribute would be taken
in as value to this keyword attribute.
<system name="name_of_thy_system"/>
2. browser_name = This <browser_name> tag is a child of the
<browser> tag in the data file. Each browser
instance should have a unique name. This name can
be added here
Eg: <browser_name>Unique_name_1</browser_name>
3. timeout = This contains the information of how much time the
browser needs to wait for any action to be performed
on it
Eg: <timeout>15</timeout>
4. element_config_file = This <element_config_file> tag is a child
of the <browser> tag in the data file. This
stores the location of the element
configuration file that contains all
element locators.
Eg: <element_config_file>
../Config_files/selenium_config.json
</element_config_file>
5. element_tag = This element_tag refers to a particular element in
the json fie which contains relevant information to
that element. If you want to use this one element
through out the testcase for a particular browser,
you can include it in the data file. If this not
the case, then you should create an argument tag
in the relevant testcase step and add the value
directly in the testcase step.
FOR DATA FILE
Eg: <element_tag>json_name_1</element_tag>
FOR TEST CASE
Eg: <argument name="element_tag" value="json_name_1">
:Arguments:
1. system_name(str) = the system name.
2. browser_name(str) = Unique name for this particular browser
3. timeout(str) = amount of time the browser should wait
4. element_config_file (str) = location of the element configuration
file that contains all element
locators
5. element_tag (str) = particular element in the json fie which
contains relevant information to that element
:Returns:
1. status(bool)= True / False.
"""
arguments = locals()
arguments.pop('self')
status = True
wdesc = "This would permanently set the implicit wait time for " \
"given browser instance(s)"
pNote(wdesc)
pSubStep(wdesc)
browser_details = {}
system = xml_Utils.getElementWithTagAttribValueMatch(self.datafile,
"system",
"name",
system_name)
browser_list = system.findall("browser")
try:
browser_list.extend(system.find("browsers").findall("browser"))
except AttributeError:
pass
if not browser_list:
browser_list.append(1)
browser_details = arguments
for browser in browser_list:
arguments = Utils.data_Utils.get_default_ecf_and_et(arguments,
self.datafile,
browser)
if browser_details == {}:
browser_details = selenium_Utils. \
get_browser_details(browser, datafile=self.datafile, **arguments)
if browser_details is not None:
current_browser = Utils.data_Utils.get_object_from_datarepository(system_name + "_" + browser_details["browser_name"])
if not current_browser:
pNote("Browser of system {0} and name {1} not found in the "
"datarepository"
.format(system_name, browser_details["browser_name"]),
"Exception")
status = False
else:
self.wait_oper_object.\
implicit_wait(current_browser,
browser_details["timeout"])
browser_details = {}
Utils.testcase_Utils.report_substep_status(status)
if current_browser:
selenium_Utils.save_screenshot_onerror(status, current_browser)
return status
def wait_until_element_is_clickable(self, system_name, timeout=5,
locator=None, locator_type=None,
browser_name="all", element_tag=None,
element_config_file=None):
"""
This keyword would check whether an element is visible and
enabled such that we can click on the element
:Datafile Usage:
Tags or attributes to be used in input datafile for the system or
subsystem. If both tag and attribute is provided the attribute will
be used.
1. system_name = This attribute can be specified in the datafile as
a <system> tag directly under the <credentials>
tag. An attribute "name" has to be added to this
tag and the value of that attribute would be taken
in as value to this keyword attribute.
<system name="name_of_thy_system"/>
2. browser_name = This <browser_name> tag is a child of the
<browser> tag in the data file. Each browser
instance should have a unique name. This name can
be added here
Eg: <browser_name>Unique_name_1</browser_name>
3. timeout = This contains the information of how much time the
browser needs to wait for the element to become
clickable
Eg: <timeout>15</timeout>
4. locator_type = This contains information about the type of
locator that you want to use. Can be 'xpath',
'id', 'css', 'link', 'tag','class', 'name'
5. locator = This contains the value of the locator. Something like
"form", "nav-tags", "//[dh./dhh[yby]"
6. element_config_file = This contains the location of the json
file that contains information about all
the elements that you require for the
testcase execution
7. element_tag = This contains the name of the element in that
element_config_file which you want to use
USING LOCATOR_TYPE, LOCATOR, ELEMENT_CONFIG_FILE, AND ELEMENT_TAG
=================================================================
None of these arguments are mandatory BUT to search an element,
you need to provide Warrior with some way to do it.
a. You can either directly give values for the locator_type and
locator. So if locator_type = name and locator = navigation-bar,
then Warrior can search for an element with name "navigation-bar"
b. You can give location of the element_config_file and a tag inside
it so that Warrior can search for that tag and get the required
information from there.
- Now, if the locator type is given, Warrior
will search for that locator_type in the children of that element in
the element_config_file
- You can also set defaults in the element_config_file, and now,
even if the locator_type is not given, Warrior will know which
element to find. If locator_type is given, the default will be
overridden
- If locator_type is not f=given, and the defaults are not
specified, then the first element in the child list of the element
tag would be picked.
NOTES:
For these four arguments to be given correctly, ONE of the
following conditions must be satisfied.
1. locator_type and locator must be given
2. locator_type, element_config_file, and element_tag must be given
3. element_config_file, and element_tag must be given
The datafile has the first priority, then the json file, and
then finally the testcase.
If all arguments are passed from the same place, then, if
locator and locator_type are given, then they would have
priority. Otherwise, the element_config_file would be searched
The locator_type locator, element_tag can be given the datafile
as children of the <browser> tag, but these values would remain
constant for that browser. It is recommended that these values
be passed from the testcase step.
The element_config_file typically would not change from step to
step, so it can be passed from the data file
:Arguments:
1. system_name(str) = the system name.
2. browser_name(str) = Unique name for this particular browser
3. timeout(str) = amount of time the browser should wait
4. locator_type(str) = type of the locator - xpath, id, etc.
5. locator(str) = locator by which the element should be located.
6. element_config_file(str) = location of the element config file
7. element_tag(str) = json id of the locator that you want to use
from the element config file
:Returns:
1. status(bool)= True / False.
"""
arguments = locals()
arguments.pop('self')
status = True
wdesc = "Browser would wait until element is clickable"
pNote(wdesc)
pSubStep(wdesc)
browser_details = {}
system = xml_Utils.getElementWithTagAttribValueMatch(self.datafile,
"system",
"name",
system_name)
browser_list = system.findall("browser")
try:
browser_list.extend(system.find("browsers").findall("browser"))
except AttributeError:
pass
if not browser_list:
browser_list.append(1)
browser_details = arguments
for browser in browser_list:
arguments = Utils.data_Utils.get_default_ecf_and_et(arguments,
self.datafile,
browser)
if browser_details == {}:
browser_details = selenium_Utils. \
get_browser_details(browser, datafile=self.datafile, **arguments)
if browser_details is not None:
current_browser = Utils.data_Utils.get_object_from_datarepository(system_name + "_" + browser_details["browser_name"])
if not current_browser:
pNote("Browser of system {0} and name {1} not found in the "
"datarepository"
.format(system_name, browser_details["browser_name"]),
"Exception")
status = False
else:
status = self.wait_oper_object.\
wait_until_element_is_clickable(current_browser,
browser_details["locator_type"],
browser_details["locator"],
browser_details["timeout"])
browser_details = {}
Utils.testcase_Utils.report_substep_status(status)
if current_browser:
selenium_Utils.save_screenshot_onerror(status, current_browser)
return status
def wait_until_presence_of_element_located(self, system_name, timeout=5,
locator=None, locator_type=None,
browser_name="all",
element_tag=None,
element_config_file=None):
"""
This keyword would check whether an element is present on the DOM
of a page
:Datafile Usage:
Tags or attributes to be used in input datafile for the system or
subsystem. If both tag and attribute is provided the attribute will
be used.
1. system_name = This attribute can be specified in the datafile as
a <system> tag directly under the <credentials>
tag. An attribute "name" has to be added to this
tag and the value of that attribute would be taken
in as value to this keyword attribute.
<system name="name_of_thy_system"/>
2. browser_name = This <browser_name> tag is a child of the
<browser> tag in the data file. Each browser
instance should have a unique name. This name can
be added here
Eg: <browser_name>Unique_name_1</browser_name>
3. timeout = This contains the information of how much time the
browser needs to wait for the element to be located
Eg: <timeout>15</timeout>
4. locator_type = This contains information about the type of
locator that you want to use. Can be 'xpath',
'id', 'css', 'link', 'tag','class', 'name'
5. locator = This contains the value of the locator. Something like
"form", "nav-tags", "//[dh./dhh[yby]"
6. element_config_file = This contains the location of the json
file that contains information about all
the elements that you require for the
testcase execution
7. element_tag = This contains the name of the element in that
element_config_file which you want to use
USING LOCATOR_TYPE, LOCATOR, ELEMENT_CONFIG_FILE, AND ELEMENT_TAG
=================================================================
None of these arguments are mandatory BUT to search an element,
you need to provide Warrior with some way to do it.
a. You can either directly give values for the locator_type and
locator. So if locator_type = name and locator = navigation-bar,
then Warrior can search for an element with name "navigation-bar"
b. You can give location of the element_config_file and a tag inside
it so that Warrior can search for that tag and get the required
information from there.
- Now, if the locator type is given, Warrior
will search for that locator_type in the children of that element in
the element_config_file
- You can also set defaults in the element_config_file, and now,
even if the locator_type is not given, Warrior will know which
element to find. If locator_type is given, the default will be
overridden
- If locator_type is not f=given, and the defaults are not
specified, then the first element in the child list of the element
tag would be picked.
NOTES:
For these four arguments to be given correctly, ONE of the
following conditions must be satisfied.
1. locator_type and locator must be given
2. locator_type, element_config_file, and element_tag must be given
3. element_config_file, and element_tag must be given
The datafile has the first priority, then the json file, and
then finally the testcase.
If all arguments are passed from the same place, then, if
locator and locator_type are given, then they would have
priority. Otherwise, the element_config_file would be searched
The locator_type locator, element_tag can be given the datafile
as children of the <browser> tag, but these values would remain
constant for that browser. It is recommended that these values
be passed from the testcase step.
The element_config_file typically would not change from step to
step, so it can be passed from the data file
:Arguments:
1. system_name(str) = the system name.
2. browser_name(str) = Unique name for this particular browser
3. timeout(str) = amount of time the browser should wait
4. locator_type(str) = type of the locator - xpath, id, etc.
5. locator(str) = locator by which the element should be located.
6. element_config_file(str) = location of the element config file
7. element_tag(str) = json id of the locator that you want to use
from the element config file
:Returns:
1. status(bool)= True / False.
"""
arguments = locals()
arguments.pop('self')
status = True
wdesc = "Browser would wait until presence of element is detected"
pNote(wdesc)
pSubStep(wdesc)
browser_details = {}
system = xml_Utils.getElementWithTagAttribValueMatch(self.datafile,
"system",
"name",
system_name)
browser_list = system.findall("browser")
try:
browser_list.extend(system.find("browsers").findall("browser"))
except AttributeError:
pass
if not browser_list:
browser_list.append(1)
browser_details = arguments
for browser in browser_list:
arguments = Utils.data_Utils.get_default_ecf_and_et(arguments,
self.datafile,
browser)
if browser_details == {}:
browser_details = selenium_Utils. \
get_browser_details(browser, datafile=self.datafile, **arguments)
if browser_details is not None:
current_browser = Utils.data_Utils.get_object_from_datarepository(system_name + "_" + browser_details["browser_name"])
if not current_browser:
pNote("Browser of system {0} and name {1} not found in the "
"datarepository"
.format(system_name, browser_details["browser_name"]),
"Exception")
status = False
else:
status = self.wait_oper_object.\
wait_until_presence_of_element_located(current_browser,
browser_details["locator_type"],
browser_details["locator"],
browser_details["timeout"])
browser_details = {}
Utils.testcase_Utils.report_substep_status(status)
if current_browser:
selenium_Utils.save_screenshot_onerror(status, current_browser)
return status
def wait_until_presence_of_all_elements_located(self, system_name,
timeout=5, locator=None,
locator_type=None,
browser_name="all",
element_tag=None,
element_config_file=None):
"""
This keyword would check whether all the elements is present on
the DOM of a page
:Datafile Usage:
Tags or attributes to be used in input datafile for the system or
subsystem. If both tag and attribute is provided the attribute will
be used.
1. system_name = This attribute can be specified in the datafile as
a <system> tag directly under the <credentials>
tag. An attribute "name" has to be added to this
tag and the value of that attribute would be taken
in as value to this keyword attribute.
<system name="name_of_thy_system"/>
2. browser_name = This <browser_name> tag is a child of the
<browser> tag in the data file. Each browser
instance should have a unique name. This name can
be added here
Eg: <browser_name>Unique_name_1</browser_name>
3. timeout = This contains the information of how much time the
browser needs to wait for all the elemnts to be located
Eg: <timeout>15</timeout>
4. locator_type = This contains information about the type of
locator that you want to use. Can be 'xpath',
'id', 'css', 'link', 'tag','class', 'name'
5. locator = This contains the value of the locator. Something like
"form", "nav-tags", "//[dh./dhh[yby]"
6. element_config_file = This contains the location of the json
file that contains information about all
the elements that you require for the
testcase execution
7. element_tag = This contains the name of the element in that
element_config_file which you want to use
USING LOCATOR_TYPE, LOCATOR, ELEMENT_CONFIG_FILE, AND ELEMENT_TAG
=================================================================
None of these arguments are mandatory BUT to search an element,
you need to provide Warrior with some way to do it.
a. You can either directly give values for the locator_type and
locator. So if locator_type = name and locator = navigation-bar,
then Warrior can search for an element with name "navigation-bar"
b. You can give location of the element_config_file and a tag inside
it so that Warrior can search for that tag and get the required
information from there.
- Now, if the locator type is given, Warrior
will search for that locator_type in the children of that element in
the element_config_file
- You can also set defaults in the element_config_file, and now,
even if the locator_type is not given, Warrior will know which
element to find. If locator_type is given, the default will be
overridden
- If locator_type is not f=given, and the defaults are not
specified, then the first element in the child list of the element
tag would be picked.
NOTES:
For these four arguments to be given correctly, ONE of the
following conditions must be satisfied.
1. locator_type and locator must be given
2. locator_type, element_config_file, and element_tag must be given
3. element_config_file, and element_tag must be given
The datafile has the first priority, then the json file, and
then finally the testcase.
If all arguments are passed from the same place, then, if
locator and locator_type are given, then they would have
priority. Otherwise, the element_config_file would be searched
The locator_type locator, element_tag can be given the datafile
as children of the <browser> tag, but these values would remain
constant for that browser. It is recommended that these values
be passed from the testcase step.
The element_config_file typically would not change from step to
step, so it can be passed from the data file
:Arguments:
1. system_name(str) = the system name.
2. browser_name(str) = Unique name for this particular browser
3. timeout(str) = amount of time the browser should wait
4. locator_type(str) = type of the locator - xpath, id, etc.
5. locator(str) = locator by which the element should be located.
6. element_config_file(str) = location of the element config file
7. element_tag(str) = json id of the locator that you want to use
from the element config file
:Returns:
1. status(bool)= True / False.
"""
arguments = locals()
arguments.pop('self')
status = True
wdesc = "Browser would wait until presence of elements is detected"
pNote(wdesc)
pSubStep(wdesc)
browser_details = {}
system = xml_Utils.getElementWithTagAttribValueMatch(self.datafile,
"system",
"name",
system_name)
browser_list = system.findall("browser")
try:
browser_list.extend(system.find("browsers").findall("browser"))
except AttributeError:
pass
if not browser_list:
browser_list.append(1)
browser_details = arguments
for browser in browser_list:
arguments = Utils.data_Utils.get_default_ecf_and_et(arguments,
self.datafile,
browser)
if browser_details == {}:
browser_details = selenium_Utils. \
get_browser_details(browser, datafile=self.datafile, **arguments)
if browser_details is not None:
current_browser = Utils.data_Utils.get_object_from_datarepository(system_name + "_" + browser_details["browser_name"])
if not current_browser:
pNote("Browser of system {0} and name {1} not found in the "
"datarepository"
.format(system_name, browser_details["browser_name"]),
"Exception")
status = False
else:
status = self.wait_oper_object.\
wait_until_presence_of_all_elements_located(current_browser,
browser_details["locator_type"],
browser_details["locator"],
browser_details["timeout"])
browser_details = {}
Utils.testcase_Utils.report_substep_status(status)
if current_browser:
selenium_Utils.save_screenshot_onerror(status, current_browser)
return status
def wait_until_visibility_is_determined(self, system_name, timeout="5",
locator=None, locator_type=None,
browser_name="all",
element_tag=None,
element_config_file=None):
"""
This keyword would check whether an element, known to be present on
the DOM of a page, is visible
:Datafile Usage:
Tags or attributes to be used in input datafile for the system or
subsystem. If both tag and attribute is provided the attribute will
be used.
1. system_name = This attribute can be specified in the datafile as
a <system> tag directly under the <credentials>
tag. An attribute "name" has to be added to this
tag and the value of that attribute would be taken
in as value to this keyword attribute.
<system name="name_of_thy_system"/>
2. browser_name = This <browser_name> tag is a child of the
<browser> tag in the data file. Each browser
instance should have a unique name. This name can
be added here
Eg: <browser_name>Unique_name_1</browser_name>
3. timeout = This contains the information of how much time the
browser needs to wait for an element known to exist in
the DOM to become visible
Eg: <timeout>15</timeout>
4. locator_type = This contains information about the type of
locator that you want to use. Can be 'xpath',
'id', 'css', 'link', 'tag','class', 'name'
5. locator = This contains the value of the locator. Something like
"form", "nav-tags", "//[dh./dhh[yby]"
6. element_config_file = This contains the location of the json
file that contains information about all
the elements that you require for the
testcase execution
7. element_tag = This contains the name of the element in that
element_config_file which you want to use
USING LOCATOR_TYPE, LOCATOR, ELEMENT_CONFIG_FILE, AND ELEMENT_TAG
=================================================================
None of these arguments are mandatory BUT to search an element,
you need to provide Warrior with some way to do it.
a. You can either directly give values for the locator_type and
locator. So if locator_type = name and locator = navigation-bar,
then Warrior can search for an element with name "navigation-bar"
b. You can give location of the element_config_file and a tag inside
it so that Warrior can search for that tag and get the required
information from there.
- Now, if the locator type is given, Warrior
will search for that locator_type in the children of that element in
the element_config_file
- You can also set defaults in the element_config_file, and now,
even if the locator_type is not given, Warrior will know which
element to find. If locator_type is given, the default will be
overridden
- If locator_type is not f=given, and the defaults are not
specified, then the first element in the child list of the element
tag would be picked.
NOTES:
For these four arguments to be given correctly, ONE of the
following conditions must be satisfied.
1. locator_type and locator must be given
2. locator_type, element_config_file, and element_tag must be given
3. element_config_file, and element_tag must be given
The datafile has the first priority, then the json file, and
then finally the testcase.
If all arguments are passed from the same place, then, if
locator and locator_type are given, then they would have
priority. Otherwise, the element_config_file would be searched
The locator_type locator, element_tag can be given the datafile
as children of the <browser> tag, but these values would remain
constant for that browser. It is recommended that these values
be passed from the testcase step.
The element_config_file typically would not change from step to
step, so it can be passed from the data file
:Arguments:
1. system_name(str) = the system name.
2. browser_name(str) = Unique name for this particular browser
3. timeout(str) = amount of time the browser should wait
4. locator_type(str) = type of the locator - xpath, id, etc.
5. locator(str) = locator by which the element should be located.
6. element_config_file(str) = location of the element config file
7. element_tag(str) = json id of the locator that you want to use
from the element config file
:Returns:
1. status(bool)= True / False.
"""
arguments = locals()
arguments.pop('self')
status = True
wdesc = "Browser would wait until visibility of an element known to " \
"be present in the DOM is determined"
pNote(wdesc)
pSubStep(wdesc)
browser_details = {}
system = xml_Utils.getElementWithTagAttribValueMatch(self.datafile,
"system",
"name",
system_name)
browser_list = system.findall("browser")
try:
browser_list.extend(system.find("browsers").findall("browser"))
except AttributeError:
pass
if not browser_list:
browser_list.append(1)
browser_details = arguments
for browser in browser_list:
arguments = Utils.data_Utils.get_default_ecf_and_et(arguments,
self.datafile,
browser)
if browser_details == {}:
browser_details = selenium_Utils. \
get_browser_details(browser, datafile=self.datafile, **arguments)
if browser_details is not None:
current_browser = Utils.data_Utils.get_object_from_datarepository(system_name + "_" + browser_details["browser_name"])
if not current_browser:
pNote("Browser of system {0} and name {1} not found in the "
"datarepository"
.format(system_name, browser_details["browser_name"]),
"Exception")
status = False
else:
element = Utils.data_Utils.\
get_object_from_datarepository(system_name + "_" +
browser_details["browser_name"] + "_" +
browser_details["locator_type"] + "=" +
browser_details["locator"])
if element:
status = self.wait_oper_object.\
wait_until_visibilty_is_confirmed(current_browser,
element,
browser_details["timeout"])
else:
element = self.element_locator_obj.\
get_element(current_browser,
browser_details["locator_type"] + "=" +
browser_details["locator"])
status = self.wait_oper_object.\
wait_until_visibilty_is_confirmed(current_browser,
element,
browser_details["timeout"])
browser_details = {}
Utils.testcase_Utils.report_substep_status(status)
if current_browser:
selenium_Utils.save_screenshot_onerror(status, current_browser)
return status
def wait_until_visibility_of_element_located(self, system_name, timeout="5",
locator=None,
locator_type=None,
browser_name="all",
element_tag=None,
element_config_file=None):
"""
This keyword would check whether an element is present on the DOM of a
page and visible.
:Datafile Usage:
Tags or attributes to be used in input datafile for the system or
subsystem. If both tag and attribute is provided the attribute will
be used.
1. system_name = This attribute can be specified in the datafile as
a <system> tag directly under the <credentials>
tag. An attribute "name" has to be added to this
tag and the value of that attribute would be taken
in as value to this keyword attribute.
<system name="name_of_thy_system"/>
2. browser_name = This <browser_name> tag is a child of the
<browser> tag in the data file. Each browser
instance should have a unique name. This name can
be added here
Eg: <browser_name>Unique_name_1</browser_name>
3. timeout = This contains the information of how much time the
browser needs to wait for an element whose existence in
the DOM is unknown to become visible
Eg: <timeout>15</timeout>
4. locator_type = This contains information about the type of
locator that you want to use. Can be 'xpath',
'id', 'css', 'link', 'tag','class', 'name'
5. locator = This contains the value of the locator. Something like
"form", "nav-tags", "//[dh./dhh[yby]"
6. element_config_file = This contains the location of the json
file that contains information about all
the elements that you require for the
testcase execution
7. element_tag = This contains the name of the element in that
element_config_file which you want to use
USING LOCATOR_TYPE, LOCATOR, ELEMENT_CONFIG_FILE, AND ELEMENT_TAG
=================================================================
None of these arguments are mandatory BUT to search an element,
you need to provide Warrior with some way to do it.
a. You can either directly give values for the locator_type and
locator. So if locator_type = name and locator = navigation-bar,
then Warrior can search for an element with name "navigation-bar"
b. You can give location of the element_config_file and a tag inside
it so that Warrior can search for that tag and get the required
information from there.
- Now, if the locator type is given, Warrior
will search for that locator_type in the children of that element in
the element_config_file
- You can also set defaults in the element_config_file, and now,
even if the locator_type is not given, Warrior will know which
element to find. If locator_type is given, the default will be
overridden
- If locator_type is not f=given, and the defaults are not
specified, then the first element in the child list of the element
tag would be picked.
NOTES:
For these four arguments to be given correctly, ONE of the
following conditions must be satisfied.
1. locator_type and locator must be given
2. locator_type, element_config_file, and element_tag must be given
3. element_config_file, and element_tag must be given
The datafile has the first priority, then the json file, and
then finally the testcase.
If all arguments are passed from the same place, then, if
locator and locator_type are given, then they would have
priority. Otherwise, the element_config_file would be searched
The locator_type locator, element_tag can be given the datafile
as children of the <browser> tag, but these values would remain
constant for that browser. It is recommended that these values
be passed from the testcase step.
The element_config_file typically would not change from step to
step, so it can be passed from the data file
:Arguments:
1. system_name(str) = the system name.
2. browser_name(str) = Unique name for this particular browser
3. timeout(str) = amount of time the browser should wait
4. locator_type(str) = type of the locator - xpath, id, etc.
5. locator(str) = locator by which the element should be located.
6. element_config_file(str) = location of the element config file
7. element_tag(str) = json id of the locator that you want to use
from the element config file
:Returns:
1. status(bool)= True / False.
"""
arguments = locals()
arguments.pop('self')
status = True
wdesc = "Browser would wait until visibility of an element known to " \
"be is determined"
pNote(wdesc)
pSubStep(wdesc)
browser_details = {}
system = xml_Utils.getElementWithTagAttribValueMatch(self.datafile,
"system",
"name",
system_name)
browser_list = system.findall("browser")
try:
browser_list.extend(system.find("browsers").findall("browser"))
except AttributeError:
pass
if not browser_list:
browser_list.append(1)
browser_details = arguments
for browser in browser_list:
arguments = Utils.data_Utils.get_default_ecf_and_et(arguments,
self.datafile,
browser)
if browser_details == {}:
browser_details = selenium_Utils. \
get_browser_details(browser, datafile=self.datafile, **arguments)
if browser_details is not None:
current_browser = Utils.data_Utils.get_object_from_datarepository(system_name + "_" + browser_details["browser_name"])
if not current_browser:
pNote("Browser of system {0} and name {1} not found in the "
"datarepository"
.format(system_name, browser_details["browser_name"]),
"Exception")
status = False
else:
status = self.wait_oper_object.\
wait_until_visibility_of_element_located(current_browser,
browser_details["locator_type"],
browser_details["locator"],
browser_details["timeout"])
browser_details = {}
Utils.testcase_Utils.report_substep_status(status)
if current_browser:
selenium_Utils.save_screenshot_onerror(status, current_browser)
return status
| |
################################################################################
# Copyright (C) 2011-2012,2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Module for the mixture distribution node.
"""
import warnings
import numpy as np
from bayespy.utils import misc
from .node import Node
from .expfamily import ExponentialFamily, \
ExponentialFamilyDistribution, \
useconstructor
from .categorical import Categorical, \
CategoricalMoments
class MixtureDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of mixture variables.
"""
def __init__(self, distribution, cluster_plate, n_clusters, ndims,
ndims_parents):
"""
Create VMP formula node for a mixture variable
"""
self.distribution = distribution
self.cluster_plate = cluster_plate
self.ndims = ndims
self.ndims_parents = ndims_parents
self.K = n_clusters
def compute_message_to_parent(self, parent, index, u, *u_parents):
"""
Compute the message to a parent node.
"""
if index == 0:
# Shape(phi) = [Nn,..,K,..,N0,Dd,..,D0]
# Shape(L) = [Nn,..,K,..,N0]
# Shape(u) = [Nn,..,N0,Dd,..,D0]
# Shape(result) = [Nn,..,N0,K]
# Compute g:
# Shape(g) = [Nn,..,K,..,N0]
g = self.distribution.compute_cgf_from_parents(*(u_parents[1:]))
# Reshape(g):
# Shape(g) = [Nn,..,N0,K]
if np.ndim(g) < abs(self.cluster_plate):
# Not enough axes, just add the cluster plate axis
g = np.expand_dims(g, -1)
else:
# Move the cluster plate axis
g = misc.moveaxis(g, self.cluster_plate, -1)
# Compute phi:
# Shape(phi) = [Nn,..,K,..,N0,Dd,..,D0]
phi = self.distribution.compute_phi_from_parents(*(u_parents[1:]))
# Move phi axis:
# Shape(phi) = [Nn,..,N0,K,Dd,..,D0]
for ind in range(len(phi)):
if self.cluster_plate < 0:
axis_from = self.cluster_plate-self.ndims[ind]
else:
raise RuntimeError("Cluster plate axis must be negative")
axis_to = -1-self.ndims[ind]
if np.ndim(phi[ind]) >= abs(axis_from):
# Cluster plate axis exists, move it to the correct position
phi[ind] = misc.moveaxis(phi[ind], axis_from, axis_to)
else:
# No cluster plate axis, just add a new axis to the correct
# position, if phi has something on that axis
if np.ndim(phi[ind]) >= abs(axis_to):
phi[ind] = np.expand_dims(phi[ind], axis=axis_to)
# Reshape u:
# Shape(u) = [Nn,..,N0,1,Dd,..,D0]
u_self = list()
for ind in range(len(u)):
u_self.append(np.expand_dims(u[ind],
axis=(-1-self.ndims[ind])))
# Compute logpdf:
# Shape(L) = [Nn,..,N0,K]
L = self.distribution.compute_logpdf(u_self, phi, g, 0, self.ndims)
# Sum over other than the cluster dimensions? No!
# Hmm.. I think the message passing method will do
# that automatically
m = [L]
return m
elif index >= 1:
# Parent index for the distribution used for the
# mixture.
index_for_parent = index - 1
# Reshape u:
# Shape(u) = [Nn,..1,..,N0,Dd,..,D0]
u_self = list()
for ind in range(len(u)):
if self.cluster_plate < 0:
cluster_axis = self.cluster_plate - self.ndims[ind]
else:
raise ValueError("Cluster plate axis must be negative")
u_self.append(np.expand_dims(u[ind], axis=cluster_axis))
# Message from the mixed distribution
m = self.distribution.compute_message_to_parent(parent,
index_for_parent,
u_self,
*(u_parents[1:]))
# Note: The cluster assignment probabilities can be considered as
# weights to plate elements. These weights need to mapped properly
# via the plate mapping of self.distribution. Otherwise, nested
# mixtures won't work, or possibly not any distribution that does
# something to the plates. Thus, use compute_weights_to_parent to
# compute the transformations to the weight array properly.
#
# See issue #39 for more details.
# Compute weights (i.e., cluster assignment probabilities) and map
# the plates properly.
p = misc.atleast_nd(u_parents[0][0], abs(self.cluster_plate))
p = misc.moveaxis(p, -1, self.cluster_plate)
p = self.distribution.compute_weights_to_parent(
index_for_parent,
p,
)
# Weigh the elements in the message array
#
# TODO/FIXME: This may result in huge intermediate arrays. Need to
# use einsum!
m = [mi * misc.add_trailing_axes(p, ndim)
#for (mi, ndim) in zip(m, self.ndims)]
for (mi, ndim) in zip(m, self.ndims_parents[index_for_parent])]
return m
def compute_weights_to_parent(self, index, weights):
"""
Maps the mask to the plates of a parent.
"""
if index == 0:
return weights
else:
if self.cluster_plate >= 0:
raise ValueError("Cluster plate axis must be negative")
if np.ndim(weights) >= abs(self.cluster_plate):
weights = np.expand_dims(weights, axis=self.cluster_plate)
return self.distribution.compute_weights_to_parent(
index-1,
weights
)
def compute_phi_from_parents(self, *u_parents, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
# Compute weighted average of the parameters
# Cluster parameters
Phi = self.distribution.compute_phi_from_parents(*(u_parents[1:]))
# Contributions/weights/probabilities
P = u_parents[0][0]
phi = list()
nans = False
for ind in range(len(Phi)):
# Compute element-wise product and then sum over K clusters.
# Note that the dimensions aren't perfectly aligned because
# the cluster dimension (K) may be arbitrary for phi, and phi
# also has dimensions (Dd,..,D0) of the parameters.
# Shape(phi) = [Nn,..,K,..,N0,Dd,..,D0]
# Shape(p) = [Nn,..,N0,K]
# Shape(result) = [Nn,..,N0,Dd,..,D0]
# General broadcasting rules apply for Nn,..,N0, that is,
# preceding dimensions may be missing or dimension may be
# equal to one. Probably, shape(phi) has lots of missing
# dimensions and/or dimensions that are one.
if self.cluster_plate < 0:
cluster_axis = self.cluster_plate - self.ndims[ind]
else:
raise RuntimeError("Cluster plate should be negative")
# Move cluster axis to the last:
# Shape(phi) = [Nn,..,N0,Dd,..,D0,K]
if np.ndim(Phi[ind]) >= abs(cluster_axis):
phi.append(misc.moveaxis(Phi[ind], cluster_axis, -1))
else:
phi.append(Phi[ind][...,None])
# Add axes to p:
# Shape(p) = [Nn,..,N0,K,1,..,1]
p = misc.add_trailing_axes(P, self.ndims[ind])
# Move cluster axis to the last:
# Shape(p) = [Nn,..,N0,1,..,1,K]
p = misc.moveaxis(p, -(self.ndims[ind]+1), -1)
# Handle zero probability cases. This avoids nans when p=0 and
# phi=inf.
phi[ind] = np.where(p != 0, phi[ind], 0)
# Now the shapes broadcast perfectly and we can sum
# p*phi over the last axis:
# Shape(result) = [Nn,..,N0,Dd,..,D0]
phi[ind] = misc.sum_product(p, phi[ind], axes_to_sum=-1)
if np.any(np.isnan(phi[ind])):
nans = True
if nans:
warnings.warn("The natural parameters of mixture distribution "
"contain nans. This may happen if you use fixed "
"parameters in your model. Technically, one possible "
"reason is that the cluster assignment probability "
"for some element is zero (p=0) and the natural "
"parameter of that cluster is -inf, thus "
"0*(-inf)=nan. Solution: Use parameters that assign "
"non-zero probabilities for the whole domain.")
return phi
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
return self.distribution.compute_moments_and_cgf(phi, mask=mask)
def compute_cgf_from_parents(self, *u_parents):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
# Compute weighted average of g over the clusters.
# Shape(g) = [Nn,..,K,..,N0]
# Shape(p) = [Nn,..,N0,K]
# Shape(result) = [Nn,..,N0]
# Compute g for clusters:
# Shape(g) = [Nn,..,K,..,N0]
g = self.distribution.compute_cgf_from_parents(*(u_parents[1:]))
# Move cluster axis to last:
# Shape(g) = [Nn,..,N0,K]
if np.ndim(g) < abs(self.cluster_plate):
# Not enough axes, just add the cluster plate axis
g = np.expand_dims(g, -1)
else:
# Move the cluster plate axis
g = misc.moveaxis(g, self.cluster_plate, -1)
# Cluster assignments/contributions/probabilities/weights:
# Shape(p) = [Nn,..,N0,K]
p = u_parents[0][0]
# Weighted average of g over the clusters. As p and g are
# properly aligned, you can just sum p*g over the last
# axis and utilize broadcasting:
# Shape(result) = [Nn,..,N0]
g = misc.sum_product(p, g, axes_to_sum=-1)
return g
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
return self.distribution.compute_fixed_moments_and_f(x, mask=True)
def plates_to_parent(self, index, plates):
"""
Resolves the plate mapping to a parent.
Given the plates of the node's moments, this method returns the plates
that the message to a parent has for the parent's distribution.
"""
if index == 0:
return plates
else:
# Add the cluster plate axis
plates = list(plates)
if self.cluster_plate < 0:
knd = len(plates) + self.cluster_plate + 1
else:
raise RuntimeError("Cluster plate axis must be negative")
plates.insert(knd, self.K)
plates = tuple(plates)
return self.distribution.plates_to_parent(index-1, plates)
def plates_from_parent(self, index, plates):
"""
Resolve the plate mapping from a parent.
Given the plates of a parent's moments, this method returns the plates
that the moments has for this distribution.
"""
if index == 0:
return plates
else:
plates = self.distribution.plates_from_parent(index-1, plates)
# Remove the cluster plate, if the parent has it
plates = list(plates)
if len(plates) >= abs(self.cluster_plate):
plates.pop(self.cluster_plate)
return tuple(plates)
def random(self, *phi, plates=None):
"""
Draw a random sample from the distribution.
"""
return self.distribution.random(*phi, plates=plates)
def compute_gradient(self, g, u, phi):
r"""
Compute the standard gradient with respect to the natural parameters.
"""
return self.distribution.compute_gradient(g, u, phi)
class Mixture(ExponentialFamily):
r"""
Node for exponential family mixture variables.
The node represents a random variable which is sampled from a
mixture distribution. It is possible to mix any exponential family
distribution. The probability density function is
.. math::
p(x|z=k,\boldsymbol{\theta}_0,\ldots,\boldsymbol{\theta}_{K-1})
= \phi(x|\boldsymbol{\theta}_k),
where :math:`\phi` is the probability density function of the mixed
exponential family distribution and :math:`\boldsymbol{\theta}_0,
\ldots, \boldsymbol{\theta}_{K-1}` are the parameters of each
cluster. For instance, :math:`\phi` could be the Gaussian
probability density function :math:`\mathcal{N}` and
:math:`\boldsymbol{\theta}_k = \{\boldsymbol{\mu}_k,
\mathbf{\Lambda}_k\}` where :math:`\boldsymbol{\mu}_k` and
:math:`\mathbf{\Lambda}_k` are the mean vector and precision matrix
for cluster :math:`k`.
Parameters
----------
z : categorical-like node or array
:math:`z`, cluster assignment
node_class : stochastic exponential family node class
Mixed distribution
params : types specified by the mixed distribution
Parameters of the mixed distribution. If some parameters should
vary between clusters, those parameters' plate axis
`cluster_plate` should have a size which equals the number of
clusters. For parameters with shared values, that plate axis
should have length 1. At least one parameter should vary between
clusters.
cluster_plate : int, optional
Negative integer defining which plate axis is used for the
clusters in the parameters. That plate axis is ignored from the
parameters when considering the plates for this node. By
default, mix over the last plate axis.
See also
--------
Categorical, CategoricalMarkovChain
Examples
--------
A simple 2-dimensional Gaussian mixture model with three clusters
for 100 samples can be constructed, for instance, as:
>>> import numpy as np
>>> from bayespy.nodes import (Dirichlet, Categorical, Mixture,
... Gaussian, Wishart)
>>> alpha = Dirichlet([1e-3, 1e-3, 1e-3])
>>> Z = Categorical(alpha, plates=(100,))
>>> mu = Gaussian(np.zeros(2), 1e-6*np.identity(2), plates=(3,))
>>> Lambda = Wishart(2, 1e-6*np.identity(2), plates=(3,))
>>> X = Mixture(Z, Gaussian, mu, Lambda)
"""
def __init__(self, z, node_class, *params, cluster_plate=-1, **kwargs):
self.cluster_plate = cluster_plate
super().__init__(z, node_class, *params, cluster_plate=cluster_plate,
**kwargs)
@classmethod
def _constructor(cls, z, node_class, *args, cluster_plate=-1, **kwargs):
"""
Constructs distribution and moments objects.
"""
if cluster_plate >= 0:
raise ValueError("Cluster plate axis must be negative")
# Get the stuff for the mixed distribution
(parents, _, dims, mixture_plates, distribution, moments, parent_moments) = \
node_class._constructor(*args)
# Check that at least one of the parents has the cluster plate axis
if len(mixture_plates) < abs(cluster_plate):
raise ValueError("The mixed distribution does not have a plates "
"axis for the cluster plate axis")
# Resolve the number of clusters
mixture_plates = list(mixture_plates)
K = mixture_plates.pop(cluster_plate)
# Convert a node to get the number of clusters
z = cls._ensure_moments(z, CategoricalMoments, categories=K)
if z.dims[0][0] != K:
raise ValueError("Inconsistent number of clusters")
plates = cls._total_plates(kwargs.get('plates'), mixture_plates, z.plates)
ndims = [len(dim) for dim in dims]
parents = [cls._ensure_moments(p_i, m_i.__class__, **m_i.get_instance_conversion_kwargs())
for (p_i, m_i) in zip(parents, parent_moments)]
ndims_parents = [[len(dims_i) for dims_i in parent.dims]
for parent in parents]
# Convert the distribution to a mixture
distribution = MixtureDistribution(distribution,
cluster_plate,
K,
ndims,
ndims_parents)
# Add cluster assignments to parents
parent_moments = [CategoricalMoments(K)] + list(parent_moments)
parents = [z] + list(parents)
return (parents,
kwargs,
dims,
plates,
distribution,
moments,
parent_moments)
def integrated_logpdf_from_parents(self, x, index):
""" Approximates the posterior predictive pdf \int p(x|parents)
q(parents) dparents in log-scale as \int q(parents_i) exp( \int
q(parents_\i) \log p(x|parents) dparents_\i ) dparents_i."""
if index == 0:
# Integrate out the cluster assignments
# First, integrate the cluster parameters in log-scale
# compute_logpdf(cls, u, phi, g, f):
# Shape(x) = [M1,..,Mm,N1,..,Nn,D1,..,Dd]
u_parents = self._message_from_parents()
# Shape(u) = [M1,..,Mm,N1,..,1,..,Nn,D1,..,Dd]
# Shape(f) = [M1,..,Mm,N1,..,1,..,Nn]
(u, f) = self._distribution.distribution.compute_fixed_moments_and_f(x)
f = np.expand_dims(f, axis=self.cluster_plate)
for i in range(len(u)):
ndim_i = len(self.dims[i])
cluster_axis = self.cluster_plate - ndim_i
u[i] = np.expand_dims(u[i], axis=cluster_axis)
# Shape(phi) = [N1,..,K,..,Nn,D1,..,Dd]
phi = self._distribution.distribution.compute_phi_from_parents(*(u_parents[1:]))
# Shape(g) = [N1,..,K,..,Nn]
g = self._distribution.distribution.compute_cgf_from_parents(*(u_parents[1:]))
# Shape(lpdf) = [M1,..,Mm,N1,..,K,..,Nn]
lpdf = self._distribution.distribution.compute_logpdf(u, phi, g, f, self.ndims)
# From logpdf to pdf, but avoid over/underflow
lpdf_max = np.max(lpdf, axis=self.cluster_plate, keepdims=True)
pdf = np.exp(lpdf-lpdf_max)
# Move cluster axis to be the last:
# Shape(pdf) = [M1,..,Mm,N1,..,Nn,K]
pdf = misc.moveaxis(pdf, self.cluster_plate, -1)
# Cluster assignments/probabilities/weights
# Shape(p) = [N1,..,Nn,K]
p = u_parents[0][0]
# Weighted average. TODO/FIXME: Use einsum!
# Shape(pdf) = [M1,..,Mm,N1,..,Nn]
pdf = np.sum(pdf * p, axis=self.cluster_plate)
# Back to log-scale (add the overflow fix!)
lpdf_max = np.squeeze(lpdf_max, axis=self.cluster_plate)
lpdf = np.log(pdf) + lpdf_max
return lpdf
raise NotImplementedError()
def MultiMixture(thetas, *mixture_args, **kwargs):
"""Creates a mixture over several axes using as many categorical variables.
The mixings are assumed to be separate, that is, inner mixings don't affect
the parameters of outer mixings.
"""
thetas = [theta if isinstance(theta, Node) else np.asanyarray(theta)
for theta in thetas]
N = len(thetas)
# Add trailing plate axes to thetas because you assume that each
# mixed axis is separate from the others.
thetas = [theta[(Ellipsis,) + i*(None,)]
for (i, theta) in enumerate(thetas)]
args = (
thetas[:1]
+ list(misc.zipper_merge((N-1) * [Mixture], thetas[1:]))
+ list(mixture_args)
)
return Mixture(*args, **kwargs)
| |
"""Tests for the implementation of RootOf class and related tools."""
import pytest
from diofant import (Eq, Float, Function, GeneratorsNeeded, I, Lambda,
MultivariatePolynomialError, Poly, PolynomialError, Pow,
PurePoly, Rational, RootOf, RootSum, Symbol, conjugate,
exp, expand_func, false, legendre_poly, log, oo, root,
solve, sqrt, tan, true)
from diofant.abc import a, b, r, x, y, z
__all__ = ()
def test_RootOf___new__():
assert RootOf(x, 0) == 0
assert RootOf(x, -1) == 0
assert RootOf(x - 1, 0) == 1
assert RootOf(x - 1, -1) == 1
assert RootOf(x + 1, 0) == -1
assert RootOf(x + 1, -1) == -1
assert RootOf(x**2 + 2*x + 3, 0) == -1 - I*sqrt(2)
assert RootOf(x**2 + 2*x + 3, 1) == -1 + I*sqrt(2)
assert RootOf(x**2 + 2*x + 3, -1) == -1 + I*sqrt(2)
assert RootOf(x**2 + 2*x + 3, -2) == -1 - I*sqrt(2)
r = RootOf(x**2 + 2*x + 3, 0, radicals=False)
assert isinstance(r, RootOf) is True
r = RootOf(x**2 + 2*x + 3, 1, radicals=False)
assert isinstance(r, RootOf) is True
r = RootOf(x**2 + 2*x + 3, -1, radicals=False)
assert isinstance(r, RootOf) is True
r = RootOf(x**2 + 2*x + 3, -2, radicals=False)
assert isinstance(r, RootOf) is True
assert RootOf((x - 1)*(x + 1), 0, radicals=False) == -1
assert RootOf((x - 1)*(x + 1), 1, radicals=False) == 1
assert RootOf((x - 1)*(x + 1), -1, radicals=False) == 1
assert RootOf((x - 1)*(x + 1), -2, radicals=False) == -1
assert RootOf((x - 1)*(x + 1), 0, radicals=True) == -1
assert RootOf((x - 1)*(x + 1), 1, radicals=True) == 1
assert RootOf((x - 1)*(x + 1), -1, radicals=True) == 1
assert RootOf((x - 1)*(x + 1), -2, radicals=True) == -1
assert RootOf((x - 1)*(x**3 + x + 3), 0) == RootOf(x**3 + x + 3, 0)
assert RootOf((x - 1)*(x**3 + x + 3), 1) == 1
assert RootOf((x - 1)*(x**3 + x + 3), 2) == RootOf(x**3 + x + 3, 1)
assert RootOf((x - 1)*(x**3 + x + 3), 3) == RootOf(x**3 + x + 3, 2)
assert RootOf((x - 1)*(x**3 + x + 3), -1) == RootOf(x**3 + x + 3, 2)
assert RootOf((x - 1)*(x**3 + x + 3), -2) == RootOf(x**3 + x + 3, 1)
assert RootOf((x - 1)*(x**3 + x + 3), -3) == 1
assert RootOf((x - 1)*(x**3 + x + 3), -4) == RootOf(x**3 + x + 3, 0)
assert RootOf(x**4 + 3*x**3, 0) == -3
assert RootOf(x**4 + 3*x**3, 1) == 0
assert RootOf(x**4 + 3*x**3, 2) == 0
assert RootOf(x**4 + 3*x**3, 3) == 0
pytest.raises(GeneratorsNeeded, lambda: RootOf(0, 0))
pytest.raises(GeneratorsNeeded, lambda: RootOf(1, 0))
pytest.raises(PolynomialError, lambda: RootOf(Poly(0, x), 0))
pytest.raises(PolynomialError, lambda: RootOf(Poly(1, x), 0))
pytest.raises(PolynomialError, lambda: RootOf(x - y, 0))
pytest.raises(IndexError, lambda: RootOf(x**2 - 1, -4))
pytest.raises(IndexError, lambda: RootOf(x**2 - 1, -3))
pytest.raises(IndexError, lambda: RootOf(x**2 - 1, 2))
pytest.raises(IndexError, lambda: RootOf(x**2 - 1, 3))
pytest.raises(ValueError, lambda: RootOf(x**2 - 1, x))
pytest.raises(NotImplementedError,
lambda: RootOf(Symbol('a', nonzero=False)*x**5 +
2*x - 1, x, 0))
pytest.raises(NotImplementedError,
lambda: Poly(Symbol('a', nonzero=False)*x**5 +
2*x - 1, x).all_roots())
assert RootOf(Poly(x - y, x), 0) == y
assert RootOf(Poly(x**2 - y, x), 0) == -sqrt(y)
assert RootOf(Poly(x**2 - y, x), 1) == sqrt(y)
assert isinstance(RootOf(x**3 - y, x, 0), RootOf)
p = Symbol('p', positive=True)
assert RootOf(x**3 - p, x, 0) == root(p, 3)*RootOf(x**3 - 1, 0)
assert RootOf(y*x**3 + y*x + 2*y, x, 0) == -1
assert RootOf(x**3 + x + 1, 0).is_commutative is True
e = RootOf(x**2 - 4, x, 1, evaluate=False)
assert isinstance(e, RootOf)
assert e.doit() == 2
assert e.args == (x**2 - 4, x, 1)
assert e.poly == PurePoly(x**2 - 4, x)
assert e.index == 1
assert RootOf(x**7 - 0.1*x + 1, 0) == RootOf(10*x**7 - x + 10, 0)
def test_RootOf_attributes():
r = RootOf(x**3 + x + 3, 0)
assert r.is_number
assert r.free_symbols == set()
r = RootOf(x**3 + y*x + 1, x, 0)
assert isinstance(r, RootOf) and r.expr == x**3 + y*x + 1
assert r.free_symbols == {y}
assert r.is_number is False
def test_RootOf___eq__():
assert (RootOf(x**3 + x + 3, 0) == RootOf(x**3 + x + 3, 0)) is True
assert (RootOf(x**3 + x + 3, 0) == RootOf(x**3 + x + 3, 1)) is False
assert (RootOf(x**3 + x + 3, 1) == RootOf(x**3 + x + 3, 1)) is True
assert (RootOf(x**3 + x + 3, 1) == RootOf(x**3 + x + 3, 2)) is False
assert (RootOf(x**3 + x + 3, 2) == RootOf(x**3 + x + 3, 2)) is True
assert (RootOf(x**3 + x + 3, 0) == RootOf(y**3 + y + 3, 0)) is True
assert (RootOf(x**3 + x + 3, 0) == RootOf(y**3 + y + 3, 1)) is False
assert (RootOf(x**3 + x + 3, 1) == RootOf(y**3 + y + 3, 1)) is True
assert (RootOf(x**3 + x + 3, 1) == RootOf(y**3 + y + 3, 2)) is False
assert (RootOf(x**3 + x + 3, 2) == RootOf(y**3 + y + 3, 2)) is True
def test_RootOf___eval_Eq__():
f = Function('f')
r = RootOf(x**3 + x + 3, 2)
r1 = RootOf(x**3 + x + 3, 1)
assert Eq(r, r1) is false
assert Eq(r, r) is true
assert Eq(r, x) is false
assert Eq(r, 0) is false
assert Eq(r, oo) is false
assert Eq(r, I) is false
assert Eq(r, f(0)) is false
assert Eq(r, f(0)) is false
sol = solve(r.expr, x)
for s in sol:
if s[x].is_real:
assert Eq(r, s[x]) is false
r = RootOf(r.expr, 0)
for s in sol:
if s[x].is_real:
assert Eq(r, s[x]) is true
eq = x**3 + x + 1
assert ([Eq(RootOf(eq, i), j[x])
for i in range(3) for j in solve(eq)] ==
[False, False, True, False, True, False, True, False, False])
assert Eq(RootOf(eq, 0), 1 + I) is false
def test_RootOf_is_real():
assert RootOf(x**3 + x + 3, 0).is_real is True
assert RootOf(x**3 + x + 3, 1).is_real is False
assert RootOf(x**3 + x + 3, 2).is_real is False
r = RootOf(x**3 + y*x + 1, x, 0)
assert r.is_real is None
assert RootOf(x**3 + I*x + 2, 0).is_real is False
def test_RootOf_is_imaginary():
assert RootOf(x**3 + x + 3, 0).is_imaginary is False
assert RootOf(x**3 + x + 3, 1).is_imaginary is False
assert RootOf(x**3 + y*x + 1, x, 0).is_imaginary is None
assert RootOf(x**3 + I*x + 2, 0).is_real is False
assert RootOf(x**4 + 10*x**2 + 1, 2).is_imaginary is True
def test_RootOf_is_complex():
assert RootOf(x**3 + x + 3, 0).is_complex is True
assert RootOf(x**3 + y*x + 3, x, 0).is_complex is None
assert RootOf(x**3 + y*x + 3, x, 0).is_commutative
assert RootOf(x**3 + I*x + 2, 0).is_complex is True
def test_RootOf_is_algebraic():
assert RootOf(x**3 + x + 3, 0).is_algebraic is True
assert RootOf(x**3 + y*x + 3, x, 0).is_algebraic is None
def test_RootOf_power():
e = RootOf(y**3 - x, y, 0)
assert e**3 == x
assert e**2 == Pow(e, 2, evaluate=False)
e2 = RootOf(y**3 - x*y, y, 0)
assert e2**3 == Pow(e2, 3, evaluate=False)
e3 = RootOf(3*x**5 + 2*x - 1, 0)
assert e3**5 == -2*e3/3 + Rational(1, 3) # issue sympy/sympy#8543
assert e3**4 == Pow(e3, 4, evaluate=False)
assert e3**-1 == 3*e3**4 + 2
def test_RootOf_conjugate():
p = x**7 + x + 1
assert RootOf(p, 0).conjugate() == RootOf(p, 0)
assert RootOf(p, 1).conjugate() == RootOf(p, 2)
assert RootOf(p, 2).conjugate() == RootOf(p, 1)
assert RootOf(p, 6).conjugate() == RootOf(p, 5)
p2 = p*(x - 123)
assert RootOf(p2, 0).conjugate() == RootOf(p2, 0)
assert RootOf(p2, 1).conjugate() == RootOf(p2, 1)
assert RootOf(p2, 2).conjugate() == RootOf(p2, 3)
assert RootOf(p2, 3).conjugate() == RootOf(p2, 2)
assert RootOf(p2, 7).conjugate() == RootOf(p2, 6)
p3 = Poly(x**7 + x*y + 1, x)
assert RootOf(p3, x, 0).conjugate() == conjugate(RootOf(p3, x, 0),
evaluate=False)
p4 = x**12 - 4*x**8 + 2*x**6 + 4*x**4 + 4*x**2 + 1
r4 = RootOf(p4, 4)
r5 = RootOf(p4, 5)
assert r4.conjugate() == r5
assert r4.evalf() == -r5.evalf()
def test_RootOf_subs():
assert RootOf(x**3 + x + 1, 0).subs({x: y}) == RootOf(y**3 + y + 1, 0)
eq = -x + RootOf(y**3 - x**3 + 3*x**2, y, 0) + 1
assert eq.subs({x: Rational(1, 3)}) == 0
def test_RootOf_diff():
assert RootOf(x**3 + x + 1, 0).diff(x) == 0
assert RootOf(x**3 + x + 1, 0).diff(y) == 0
r = RootOf(x**7 + x*y + 1, x, 0)
assert r.diff(y) == -r/(y + 7*r**6)
assert r.diff(x) == 0
def test_RootOf_evalf():
real = RootOf(x**3 + x + 3, 0).evalf(20)
assert real.epsilon_eq(Float('-1.2134116627622296341'))
re, im = RootOf(x**3 + x + 3, 1).evalf(20).as_real_imag()
assert re.epsilon_eq(+Float('0.60670583138111481707'))
assert im.epsilon_eq(-Float('1.45061224918844152650'))
re, im = RootOf(x**3 + x + 3, 2).evalf(20).as_real_imag()
assert re.epsilon_eq(Float('0.60670583138111481707'))
assert im.epsilon_eq(Float('1.45061224918844152650'))
p = legendre_poly(4, x, polys=True)
roots = [str(r.evalf(17)) for r in p.real_roots()]
assert roots == [
'-0.86113631159405258',
'-0.33998104358485626',
'0.33998104358485626',
'0.86113631159405258',
]
re = RootOf(x**5 - 5*x + 12, 0).evalf(20)
assert re.epsilon_eq(Float('-1.84208596619025438271'))
re, im = RootOf(x**5 - 5*x + 12, 1).evalf(20).as_real_imag()
assert re.epsilon_eq(Float('-0.351854240827371999559'))
assert im.epsilon_eq(Float('-1.709561043370328882010'))
re, im = RootOf(x**5 - 5*x + 12, 2).evalf(20).as_real_imag()
assert re.epsilon_eq(Float('-0.351854240827371999559'))
assert im.epsilon_eq(Float('+1.709561043370328882010'))
re, im = RootOf(x**5 - 5*x + 12, 3).evalf(20).as_real_imag()
assert re.epsilon_eq(Float('+1.272897223922499190910'))
assert im.epsilon_eq(Float('-0.719798681483861386681'))
re, im = RootOf(x**5 - 5*x + 12, 4).evalf(20).as_real_imag()
assert re.epsilon_eq(Float('+1.272897223922499190910'))
assert im.epsilon_eq(Float('+0.719798681483861386681'))
# issue sympy/sympy#6393
assert str(RootOf(x**5 + 2*x**4 + x**3 - 68719476736, 0).evalf(3)) == '147.'
eq = (531441*x**11 + 3857868*x**10 + 13730229*x**9 + 32597882*x**8 +
55077472*x**7 + 60452000*x**6 + 32172064*x**5 - 4383808*x**4 -
11942912*x**3 - 1506304*x**2 + 1453312*x + 512)
a, b = RootOf(eq, 1).evalf(2).as_real_imag()
c, d = RootOf(eq, 2).evalf(2).as_real_imag()
assert a == c
assert b < d
assert b == -d
# issue sympy/sympy#6451
r = RootOf(legendre_poly(64, x), 7)
assert r.evalf(2) == r.evalf(100).evalf(2)
# issue sympy/sympy#8617
ans = [w[x].evalf(2) for w in solve(x**3 - x - 4)]
assert RootOf(exp(x)**3 - exp(x) - 4, 0).evalf(2) in ans
# issue sympy/sympy#9019
r0 = RootOf(x**2 + 1, 0, radicals=False)
r1 = RootOf(x**2 + 1, 1, radicals=False)
assert r0.evalf(4, chop=True) == -1.0*I
assert r1.evalf(4, chop=True) == +1.0*I
# make sure verification is used in case a max/min traps the "root"
assert str(RootOf(4*x**5 + 16*x**3 + 12*x**2 + 7, 0).evalf(3)) == '-0.976'
assert isinstance(RootOf(x**3 + y*x + 1, x, 0).evalf(2), RootOf)
assert RootOf(x**3 + I*x + 2, 0).evalf(7) == (Float('-1.260785326', dps=7) +
I*Float('0.2684419416', dps=7))
r = RootOf(x**2 - 4456178*x + 60372201703370, 0, radicals=False)
assert r.evalf(2) == Float('2.2282e+6', dps=2) - I*Float('7.4465e+6', dps=2)
def test_RootOf_evalf_caching_bug():
r = RootOf(x**5 - 5*x + 12, 1)
r.evalf()
a = r.interval
r = RootOf(x**5 - 5*x + 12, 1)
r.evalf()
b = r.interval
assert a == b
def test_RootOf_real_roots():
assert Poly(x**5 + x + 1).real_roots() == [RootOf(x**3 - x**2 + 1, 0)]
assert Poly(x**5 + x + 1).real_roots(radicals=False) == [RootOf(
x**3 - x**2 + 1, 0)]
assert Poly(x**7 - 0.1*x + 1, x).real_roots() == [RootOf(10*x**7 - x + 10, 0)]
def test_RootOf_all_roots():
assert Poly(x**5 + x + 1).all_roots() == [
RootOf(x**3 - x**2 + 1, 0),
-Rational(1, 2) - sqrt(3)*I/2,
-Rational(1, 2) + sqrt(3)*I/2,
RootOf(x**3 - x**2 + 1, 1),
RootOf(x**3 - x**2 + 1, 2),
]
assert Poly(x**5 + x + 1).all_roots(radicals=False) == [
RootOf(x**3 - x**2 + 1, 0),
RootOf(x**2 + x + 1, 0, radicals=False),
RootOf(x**2 + x + 1, 1, radicals=False),
RootOf(x**3 - x**2 + 1, 1),
RootOf(x**3 - x**2 + 1, 2),
]
r = Poly((x**3 + x + 20)*(x**3 + x + 21)).all_roots()
assert r[0].is_real and r[1].is_real
assert all(not _.is_real for _ in r[2:])
assert r == [RootOf(x**3 + x + 21, 0), RootOf(x**3 + x + 20, 0),
RootOf(x**3 + x + 20, 1), RootOf(x**3 + x + 20, 2),
RootOf(x**3 + x + 21, 1), RootOf(x**3 + x + 21, 2)]
def test_RootOf_eval_rational():
p = legendre_poly(4, x, polys=True)
roots = [r.eval_rational(Rational(1, 10)**20) for r in p.real_roots()]
for r in roots:
assert isinstance(r, Rational)
# All we know is that the Rational instance will be at most 1/10^20 from
# the exact root. So if we evaluate to 17 digits, it must be exactly equal
# to:
roots = [str(r.evalf(17)) for r in roots]
assert roots == [
'-0.86113631159405258',
'-0.33998104358485626',
'0.33998104358485626',
'0.86113631159405258',
]
pytest.raises(NotImplementedError,
lambda: RootOf(x**3 + x + 3, 1).eval_rational(1e-3))
def test_RootSum___new__():
f = x**3 + x + 3
g = Lambda(r, log(r*x))
s = RootSum(f, g)
assert isinstance(s, RootSum) is True
assert RootSum(f**2, g) == 2*RootSum(f, g)
assert RootSum((x - 7)*f**3, g) == log(7*x) + 3*RootSum(f, g)
# issue sympy/sympy#5571
assert hash(RootSum((x - 7)*f**3, g)) == hash(log(7*x) + 3*RootSum(f, g))
pytest.raises(MultivariatePolynomialError, lambda: RootSum(x**3 + x + y))
pytest.raises(ValueError, lambda: RootSum(x**2 + 3, lambda x: x))
assert RootSum(f, log) == RootSum(f, Lambda(x, log(x)))
assert isinstance(RootSum(f, auto=False), RootSum) is True
assert RootSum(f) == 0
assert RootSum(f, Lambda(x, x)) == 0
assert RootSum(f, Lambda(x, x**2)) == -2
assert RootSum(f, Lambda(x, 1)) == 3
assert RootSum(f, Lambda(x, 2)) == 6
assert RootSum(f, auto=False).is_commutative is True
assert RootSum(f, Lambda(x, 1/(x + x**2))) == Rational(11, 3)
assert RootSum(f, Lambda(x, y/(x + x**2))) == Rational(11, 3)*y
assert RootSum(x**2 - 1, Lambda(x, 3*x**2), x) == 6
assert RootSum(x**2 - y, Lambda(x, 3*x**2), x) == 6*y
assert RootSum(x**2 - 1, Lambda(x, z*x**2), x) == 2*z
assert RootSum(x**2 - y, Lambda(x, z*x**2), x) == 2*z*y
assert RootSum(
x**2 - 1, Lambda(x, exp(x)), quadratic=True) == exp(-1) + exp(1)
assert RootSum(x**3 + a*x + a**3, tan, x) == \
RootSum(x**3 + x + 1, Lambda(x, tan(a*x)))
assert RootSum(a**3*x**3 + a*x + 1, tan, x) == \
RootSum(x**3 + x + 1, Lambda(x, tan(x/a)))
assert isinstance(RootSum(x**7 + 2*x + 1,
Lambda(x, log(x))).doit(),
RootSum)
def test_RootSum_free_symbols():
assert RootSum(x**3 + x + 3, Lambda(r, exp(r))).free_symbols == set()
assert RootSum(x**3 + x + 3, Lambda(r, exp(a*r))).free_symbols == {a}
assert RootSum(
x**3 + x + y, Lambda(r, exp(a*r)), x).free_symbols == {a, y}
def test_RootSum___eq__():
f = Lambda(x, exp(x))
assert (RootSum(x**3 + x + 1, f) == RootSum(x**3 + x + 1, f)) is True
assert (RootSum(x**3 + x + 1, f) == RootSum(y**3 + y + 1, f)) is True
assert (RootSum(x**3 + x + 1, f) == RootSum(x**3 + x + 2, f)) is False
assert (RootSum(x**3 + x + 1, f) == RootSum(y**3 + y + 2, f)) is False
def test_RootSum_doit():
rs = RootSum(x**2 + 1, Lambda(x, exp(x)))
assert isinstance(rs, RootSum) is True
assert rs.doit() == exp(-I) + exp(I)
rs = RootSum(x**2 + a, Lambda(x, exp(x)), x)
assert isinstance(rs, RootSum) is True
assert rs.doit() == exp(-sqrt(-a)) + exp(sqrt(-a))
def test_RootSum_evalf():
rs = RootSum(x**2 + 1, Lambda(x, exp(x)))
assert rs.evalf(20, chop=True).epsilon_eq(
Float('1.0806046117362794348', 20), Float('1e-20')) is true
assert rs.evalf(15, chop=True).epsilon_eq(
Float('1.08060461173628', 15), Float('1e-15')) is true
rs = RootSum(x**2 + a, Lambda(x, exp(x)), x)
assert rs.evalf() == rs
def test_RootSum_diff():
f = x**3 + x + 3
g = Lambda(r, exp(r*x))
h = Lambda(r, r*exp(r*x))
assert RootSum(f, g).diff(x) == RootSum(f, h)
def test_RootSum_subs():
f = x**3 + x + 3
g = Lambda(r, exp(r*x))
F = y**3 + y + 3
G = Lambda(r, exp(r*y))
assert RootSum(f, g).subs({y: 1}) == RootSum(f, g)
assert RootSum(f, g).subs({x: y}) == RootSum(F, G)
def test_RootSum_rational():
assert RootSum(
z**5 - z + 1, Lambda(z, z/(x - z))) == (4*x - 5)/(x**5 - x + 1)
f = 161*z**3 + 115*z**2 + 19*z + 1
g = Lambda(z, z*log(
-3381*z**4/4 - 3381*z**3/4 - 625*z**2/2 - 125*z/2 - 5 + exp(x)))
assert RootSum(f, g).diff(x) == -(
(5*exp(2*x) - 6*exp(x) + 4)*exp(x)/(exp(3*x) - exp(2*x) + 1))/7
def test_RootSum_independent():
f = (x**3 - a)**2*(x**4 - b)**3
g = Lambda(x, 5*tan(x) + 7)
h = Lambda(x, tan(x))
r0 = RootSum(x**3 - a, h, x)
r1 = RootSum(x**4 - b, h, x)
assert RootSum(f, g, x).as_ordered_terms() == [10*r0, 15*r1, 126]
def test_sympyissue_7876():
l1 = Poly(x**6 - x + 1, x).all_roots()
l2 = [RootOf(x**6 - x + 1, i) for i in range(6)]
assert frozenset(l1) == frozenset(l2)
def test_sympyissue_8316():
f = Poly(7*x**8 - 9)
assert len(f.all_roots()) == 8
f = Poly(7*x**8 - 10)
assert len(f.all_roots()) == 8
def test_rewrite():
r3 = RootOf(x**3 + x - 1, 0)
assert r3.evalf() == r3.rewrite(Pow).evalf()
assert r3.rewrite(Pow) == (-1/(3*root(Rational(1, 2) + sqrt(93)/18, 3)) +
root(Rational(1, 2) + sqrt(93)/18, 3))
r4 = RootOf(x**4 - x + 5, 0)
assert r4.evalf() == r4.rewrite(Pow).evalf()
r11 = RootOf(x**11 + x - 3, 0)
assert r11.rewrite(Pow) == r11
def test_RootOf_expand_func1():
r0 = RootOf(x**3 + x + 1, 0)
assert expand_func(r0) == r0
r1 = RootOf(x**3 - sqrt(2)*x + I, 1)
assert expand_func(r1) == RootOf(x**12 - 4*x**8 + 2*x**6 +
4*x**4 + 4*x**2 + 1, 7)
@pytest.mark.slow
def test_RootOf_expand_func2():
r0 = RootOf(x**3 + I*x + 2, 0)
assert expand_func(r0) == RootOf(x**6 + 4*x**3 + x**2 + 4, 1)
r1 = RootOf(x**3 + I*x + 2, 1)
assert expand_func(r1) == RootOf(x**6 + 4*x**3 + x**2 + 4, 3)
r2 = RootOf(x**4 + sqrt(2)*x**3 - I*x + 1, 0)
assert expand_func(r2) == RootOf(x**16 - 4*x**14 + 8*x**12 - 6*x**10 +
10*x**8 + 5*x**4 + 2*x**2 + 1, 1)
r3 = RootOf(x**3 - I*sqrt(2)*x + 5, 1)
assert expand_func(r3) == RootOf(x**6 + 10*x**3 + 2*x**2 + 25, 2)
@pytest.mark.slow
def test_RootOf_algebraic():
e = RootOf(sqrt(2)*x**4 + sqrt(2)*x**3 - I*x + sqrt(2), x, 0)
assert e.interval.as_tuple() == ((Rational(-201, 100), 0),
(Rational(-201, 200), Rational(201, 200)))
assert e.evalf(7) == Float('-1.22731258', dps=7) + I*Float('0.6094138324', dps=7)
t = RootOf(x**5 + 4*x + 2, 0)
e = RootOf(x**4 + t*x + 1, 0)
assert e.interval.as_tuple() == ((Rational(-201, 200), Rational(-201, 200)),
(Rational(-201, 400), Rational(-201, 400)))
assert e.evalf(7) == Float('-0.7123350278', dps=7) - I*Float('0.8248345032', dps=7)
@pytest.mark.timeout(10)
def test_diofantissue_730():
e = RootOf(x**3 + 10*x**2 + 1, 2)
assert e.is_real is False
assert e.is_imaginary is False
assert e.evalf(3) == Float('0.00498962', dps=3) + I*Float('0.31604', dps=3)
assert e.conjugate().conjugate() == e
@pytest.mark.timeout(150)
@pytest.mark.slow
def test_diofantissue_723():
p = x**5 + sqrt(3)*x - 2
for i in range(20):
for j in (1, 2):
RootOf(p, j)
def test_sympyissue_15413():
assert Poly(sqrt(2)*x**3 + x, x).all_roots() == [0, -I*root(2, -4),
I*root(2, -4)]
| |
# coding: utf-8
import pytest
from modernrpc.exceptions import (
RPC_INTERNAL_ERROR,
RPC_METHOD_NOT_FOUND,
RPC_INVALID_REQUEST,
)
def test_xmlrpc_multicall(xmlrpc_client):
result = xmlrpc_client.multicall(
[
("add", [5, 10]),
("divide", [30, 5]),
("add", [8, 8]),
("divide", [6, 2]),
]
)
assert isinstance(result, xmlrpc_client.multicall_result_klass)
assert list(result) == [15, 6, 16, 3]
def test_xmlrpc_multicall_with_unknown_method(xmlrpc_client):
result = xmlrpc_client.multicall(
[
("add", [5, 10]),
("unknown_method", []),
("add", [8, 8]),
]
)
assert isinstance(result, xmlrpc_client.multicall_result_klass)
assert result[0] == 15
assert result[2] == 16
exc_match = r'Method not found: "unknown_method"'
with pytest.raises(
xmlrpc_client.error_response_exception, match=exc_match
) as exc_info:
assert result[1]
xmlrpc_client.assert_exception_code(exc_info.value, RPC_METHOD_NOT_FOUND)
def test_xmlrpc_multicall_with_zero_division_error(xmlrpc_client):
result = xmlrpc_client.multicall(
[
("add", [5, 10]),
("divide", [30, 0]),
("add", [8, 8]),
]
)
assert isinstance(result, xmlrpc_client.multicall_result_klass)
assert result[0] == 15
assert result[2] == 16
exc_match = r"division by zero"
with pytest.raises(
xmlrpc_client.error_response_exception, match=exc_match
) as exc_info:
assert result[1]
xmlrpc_client.assert_exception_code(exc_info.value, RPC_INTERNAL_ERROR)
def test_jsonrpc_multicall_error(jsonrpc_client):
exc_match = r'Method not found: "system.multicall"'
with pytest.raises(
jsonrpc_client.error_response_exception, match=exc_match
) as exc_info:
jsonrpc_client.call("system.multicall")
jsonrpc_client.assert_exception_code(exc_info.value, RPC_METHOD_NOT_FOUND)
def test_jsonrpc_batch(jsonrpc_client):
result = jsonrpc_client.batch_request(
[
("add", [5, 10]),
("divide", [77, 11]),
("add", [8, 8]),
]
)
assert isinstance(result, jsonrpc_client.batch_result_klass)
assert result == [
{"id": 0, "jsonrpc": "2.0", "result": 15},
{"id": 1, "jsonrpc": "2.0", "result": 7},
{"id": 2, "jsonrpc": "2.0", "result": 16},
]
def test_jsonrpc_batch_with_unknown_method(jsonrpc_client):
result = jsonrpc_client.batch_request(
[
("add", [5, 10]),
("unknown_method", []),
("add", [8, 8]),
]
)
assert isinstance(result, jsonrpc_client.batch_result_klass)
assert result == [
{"id": 0, "jsonrpc": "2.0", "result": 15},
{
"id": 1,
"jsonrpc": "2.0",
"error": {
"code": RPC_METHOD_NOT_FOUND,
"message": 'Method not found: "unknown_method"',
},
},
{"id": 2, "jsonrpc": "2.0", "result": 16},
]
def test_jsonrpc_batch_with_zero_division_error(jsonrpc_client):
result = jsonrpc_client.batch_request(
[
("add", [5, 10]),
("divide", [9, 0]),
("add", [8, 8]),
]
)
assert isinstance(result, jsonrpc_client.batch_result_klass)
assert result == [
{"id": 0, "jsonrpc": "2.0", "result": 15},
{
"id": 1,
"jsonrpc": "2.0",
"error": {
"code": RPC_INTERNAL_ERROR,
"message": "Internal error: division by zero",
},
},
{"id": 2, "jsonrpc": "2.0", "result": 16},
]
def test_jsonrpc_batch_with_named_params(jsonrpc_client):
result = jsonrpc_client.batch_request(
[
("add", {"a": 5, "b": 10}),
("divide", {"numerator": 30, "denominator": 5}),
("method_with_kwargs", []),
("method_with_kwargs_2", [6]),
("method_with_kwargs_2", {"x": 25}),
]
)
assert isinstance(result, jsonrpc_client.batch_result_klass)
assert result == [
{"id": 0, "jsonrpc": "2.0", "result": 15},
{"id": 1, "jsonrpc": "2.0", "result": 6},
{"id": 2, "jsonrpc": "2.0", "result": "__json_rpc"},
{"id": 3, "jsonrpc": "2.0", "result": [6, "__json_rpc"]},
{"id": 4, "jsonrpc": "2.0", "result": [25, "__json_rpc"]},
]
def test_jsonrpc_batch_with_notify(jsonrpc_client):
result = jsonrpc_client.batch_request(
[
("add", {"a": 5, "b": 10}),
("method_with_kwargs", [], "notify_only"),
("divide", {"numerator": 30, "denominator": 5}),
]
)
assert isinstance(result, jsonrpc_client.batch_result_klass)
assert len(result) == 2
assert result == [
{"jsonrpc": "2.0", "id": 0, "result": 15},
{"jsonrpc": "2.0", "id": 1, "result": 6},
]
def test_jsonrpc_batch_with_only_notify(jsonrpc_client):
result = jsonrpc_client.batch_request(
[
("add", {"a": 5, "b": 10}, "notify_only"),
("method_with_kwargs", [], "notify_only"),
("divide", {"numerator": 30, "denominator": 5}, "notify_only"),
]
)
assert result is None
def test_jsonrpc_batch_invalid_request(live_server, endpoint_path):
import requests
headers = {"content-type": "application/json"}
result = requests.post(
live_server.url + endpoint_path, data="[1, 2, 3]", headers=headers
).json()
assert isinstance(result, list)
assert len(result) == 3
expected_error_message = 'Invalid JSON-RPC payload, expected "object", found "int"'
assert result == [
{
"id": None,
"jsonrpc": "2.0",
"error": {"code": RPC_INVALID_REQUEST, "message": expected_error_message},
},
{
"id": None,
"jsonrpc": "2.0",
"error": {"code": RPC_INVALID_REQUEST, "message": expected_error_message},
},
{
"id": None,
"jsonrpc": "2.0",
"error": {"code": RPC_INVALID_REQUEST, "message": expected_error_message},
},
]
| |
__author__ = 'mdavid'
from mock import *
from unittest import TestCase
from wnsresolver import *
class TestInit(TestCase):
def test_all_args(self):
wns_resolver = WalletNameResolver(
resolv_conf='resolv.conf',
dnssec_root_key='root_key',
nc_host = '127.0.0.1',
nc_port = 1234,
nc_rpcuser='rpcuser',
nc_rpcpassword='rpcpassword',
nc_tmpdir='/tmp'
)
self.assertEqual('resolv.conf', wns_resolver.resolv_conf)
self.assertEqual('root_key', wns_resolver.dnssec_root_key)
self.assertEqual('127.0.0.1', wns_resolver.nc_host)
self.assertEqual(1234, wns_resolver.nc_port)
self.assertEqual('rpcuser', wns_resolver.nc_user)
self.assertEqual('rpcpassword', wns_resolver.nc_password)
self.assertEqual('/tmp', wns_resolver.nc_tmpdir)
def test_defaults(self):
wns_resolver = WalletNameResolver()
self.assertEqual('/etc/resolv.conf', wns_resolver.resolv_conf)
self.assertEqual('/usr/local/etc/unbound/root.key', wns_resolver.dnssec_root_key)
self.assertIsNone(wns_resolver.nc_host)
self.assertEqual(8336, wns_resolver.nc_port)
self.assertIsNone(wns_resolver.nc_user)
self.assertIsNone(wns_resolver.nc_password)
self.assertIsNone(wns_resolver.nc_tmpdir)
class TestNamecoinOptions(TestCase):
def test_with_args(self):
wns_resolver = WalletNameResolver()
wns_resolver.set_namecoin_options(
host='127.0.0.1',
port=1234,
user='rpcuser',
password='rpcpassword',
tmpdir='/tmp'
)
self.assertEqual('/etc/resolv.conf', wns_resolver.resolv_conf)
self.assertEqual('/usr/local/etc/unbound/root.key', wns_resolver.dnssec_root_key)
self.assertEqual('127.0.0.1', wns_resolver.nc_host)
self.assertEqual(1234, wns_resolver.nc_port)
self.assertEqual('rpcuser', wns_resolver.nc_user)
self.assertEqual('rpcpassword', wns_resolver.nc_password)
self.assertEqual('/tmp', wns_resolver.nc_tmpdir)
def test_defaults(self):
wns_resolver = WalletNameResolver(nc_host='127.0.0.1')
wns_resolver.set_namecoin_options()
self.assertEqual('/etc/resolv.conf', wns_resolver.resolv_conf)
self.assertEqual('/usr/local/etc/unbound/root.key', wns_resolver.dnssec_root_key)
self.assertIsNone(wns_resolver.nc_host)
self.assertEqual(8336, wns_resolver.nc_port)
self.assertIsNone(wns_resolver.nc_user)
self.assertIsNone(wns_resolver.nc_password)
self.assertIsNone(wns_resolver.nc_tmpdir)
class TestResolveAvailableCurrencies(TestCase):
def setUp(self):
self.patcher1 = patch('wnsresolver.WalletNameResolver.resolve')
self.patcher2 = patch('bcresolver.NamecoinResolver')
self.mockWnsResolver = self.patcher1.start()
self.mockNamecoinResolver = self.patcher2.start()
self.mockWnsResolver.side_effect = [
'btc ltc dgc'
]
self.mockNamecoinResolver.return_value.resolve.side_effect = [
'btc ltc dgc',
]
def tearDown(self):
self.patcher1.stop()
self.patcher2.stop()
def test_go_right(self):
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve_available_currencies('wallet.mattdavid.xyz')
self.assertEqual(['btc','ltc','dgc'], ret_val)
self.assertEqual(1, self.mockWnsResolver.call_count)
def test_go_right_email_format(self):
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve_available_currencies('wallet@mattdavid.xyz')
self.assertEqual('_wallet.9e9285c79443cf2c0f868b0216308fb0e3ffeb45ade2c10ac67147f5.mattdavid.xyz', self.mockWnsResolver.call_args[0][0])
self.assertEqual(['btc','ltc','dgc'], ret_val)
self.assertEqual(1, self.mockWnsResolver.call_count)
def test_no_name(self):
wns_resolver = WalletNameResolver()
self.assertRaises(AttributeError, wns_resolver.resolve_available_currencies, None)
self.assertEqual(0, self.mockWnsResolver.call_count)
def test_namecoin_go_right(self):
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve_available_currencies('wallet.mattdavid.bit')
self.assertEqual(['btc','ltc','dgc'], ret_val)
self.assertEqual(1, self.mockNamecoinResolver.call_count)
self.assertEqual(1, self.mockNamecoinResolver.return_value.resolve.call_count)
self.assertEqual(wns_resolver.nc_host, self.mockNamecoinResolver.call_args[1]['host'])
self.assertEqual(wns_resolver.nc_user, self.mockNamecoinResolver.call_args[1]['user'])
self.assertEqual(wns_resolver.nc_password, self.mockNamecoinResolver.call_args[1]['password'])
self.assertEqual(wns_resolver.nc_port, self.mockNamecoinResolver.call_args[1]['port'])
self.assertEqual(wns_resolver.nc_tmpdir, self.mockNamecoinResolver.call_args[1]['temp_dir'])
def test_namecoin_import_error(self):
self.mockNamecoinResolver.side_effect = ImportError()
wns_resolver = WalletNameResolver()
self.assertRaises(WalletNameNamecoinUnavailable, wns_resolver.resolve_available_currencies, 'wallet.mattdavid.bit')
self.assertEqual(1, self.mockNamecoinResolver.call_count)
class TestResolveWalletName(TestCase):
def setUp(self):
self.patcher1 = patch('wnsresolver.WalletNameResolver.resolve')
self.patcher2 = patch('bcresolver.NamecoinResolver')
self.mockWnsResolver = self.patcher1.start()
self.mockNamecoinResolver = self.patcher2.start()
self.mockWnsResolver.side_effect = [
'btc',
'23456789MgDBffBffBff'
]
self.mockNamecoinResolver.return_value.resolve.side_effect = [
'btc',
'23456789MgDBffBffBff'
]
def tearDown(self):
self.patcher1.stop()
self.patcher2.stop()
def test_go_right(self):
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve_wallet_name('wallet.mattdavid.xyz', 'btc')
self.assertEqual('23456789MgDBffBffBff', ret_val)
self.assertEqual(2, self.mockWnsResolver.call_count)
def test_go_right_email_format(self):
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve_wallet_name('wallet@mattdavid.xyz', 'btc')
self.assertEqual('_wallet.9e9285c79443cf2c0f868b0216308fb0e3ffeb45ade2c10ac67147f5.mattdavid.xyz', self.mockWnsResolver.call_args_list[0][0][0])
self.assertEqual('_btc._wallet.9e9285c79443cf2c0f868b0216308fb0e3ffeb45ade2c10ac67147f5.mattdavid.xyz', self.mockWnsResolver.call_args_list[1][0][0])
self.assertEqual('23456789MgDBffBffBff', ret_val)
self.assertEqual(2, self.mockWnsResolver.call_count)
def test_no_name(self):
wns_resolver = WalletNameResolver()
self.assertRaises(AttributeError, wns_resolver.resolve_wallet_name, None, 'btc')
self.assertEqual(0, self.mockWnsResolver.call_count)
def test_no_currency(self):
wns_resolver = WalletNameResolver()
self.assertRaises(AttributeError, wns_resolver.resolve_wallet_name, 'wallet.mattdavid.xyz', None)
self.assertEqual(0, self.mockWnsResolver.call_count)
def test_no_currency_list(self):
wns_resolver = WalletNameResolver()
self.assertRaises(WalletNameCurrencyUnavailableError, wns_resolver.resolve_wallet_name, 'wallet.mattdavid.xyz', 'dgc')
self.assertEqual(1, self.mockWnsResolver.call_count)
def test_no_available_currency(self):
self.mockWnsResolver.side_effect = None
self.mockWnsResolver.return_value = None
wns_resolver = WalletNameResolver()
self.assertRaises(WalletNameUnavailableError, wns_resolver.resolve_wallet_name, 'wallet.mattdavid.xyz', 'btc')
self.assertEqual(1, self.mockWnsResolver.call_count)
def test_namecoin_go_right(self):
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve_wallet_name('wallet.mattdavid.bit', 'btc')
self.assertEqual('23456789MgDBffBffBff', ret_val)
self.assertEqual(1, self.mockNamecoinResolver.call_count)
self.assertEqual(2, self.mockNamecoinResolver.return_value.resolve.call_count)
self.assertEqual(wns_resolver.nc_host, self.mockNamecoinResolver.call_args[1]['host'])
self.assertEqual(wns_resolver.nc_user, self.mockNamecoinResolver.call_args[1]['user'])
self.assertEqual(wns_resolver.nc_password, self.mockNamecoinResolver.call_args[1]['password'])
self.assertEqual(wns_resolver.nc_port, self.mockNamecoinResolver.call_args[1]['port'])
self.assertEqual(wns_resolver.nc_tmpdir, self.mockNamecoinResolver.call_args[1]['temp_dir'])
def test_namecoin_import_error(self):
self.mockNamecoinResolver.side_effect = ImportError()
wns_resolver = WalletNameResolver()
self.assertRaises(WalletNameNamecoinUnavailable, wns_resolver.resolve_wallet_name, 'wallet.mattdavid.bit', 'btc')
self.assertEqual(1, self.mockNamecoinResolver.call_count)
class TestResolve(TestCase):
def setUp(self):
self.patcher1 = patch('wnsresolver.ub_ctx')
self.patcher2 = patch('wnsresolver.requests')
self.patcher3 = patch('wnsresolver.os')
self.patcher4 = patch('wnsresolver.request')
self.patcher5 = patch('wnsresolver.WalletNameResolver.get_endpoint_host')
self.mockUnbound = self.patcher1.start()
self.mockRequests = self.patcher2.start()
self.mockOS = self.patcher3.start()
self.mockRequest = self.patcher4.start()
self.mockGetEndpointHost = self.patcher5.start()
self.mockResult = Mock()
self.mockResult.secure = True
self.mockResult.bogus = False
self.mockResult.havedata = True
self.mockResult.data.as_domain_list.return_value = ['Yml0Y29pbjo/cj1odHRwczovL21lcmNoYW50LmNvbS9wYXkucGhwP2glM0QyYTg2MjhmYzJmYmU=']
self.mockUnbound.return_value.resolve.return_value = (0, self.mockResult)
self.mockRequests.get.return_value.text = 'test response text'
self.mockRequest.access_route = ['8.8.8.8']
def tearDown(self):
self.patcher1.stop()
self.patcher2.stop()
self.patcher3.stop()
self.patcher4.stop()
self.patcher5.stop()
def test_go_right_startswith_bitcoin_uri(self):
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve('wallet.mattdavid.xyz', 'TXT')
# Validate response
self.assertEqual('bitcoin:?r=https://merchant.com/pay.php?h%3D2a8628fc2fbe', ret_val)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(0, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_go_right_startswith_http_get_endpoint_returns_lookup_url(self):
# Setup Test case
self.mockGetEndpointHost.return_value = 'lookup_url_returned', None
self.mockResult.data.as_domain_list.return_value = ['aHR0cHM6Ly9iaXAzMmFkZHJlc3MuY29tL2dldG1pbmU=']
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve('wallet.mattdavid.xyz', 'TXT')
# Validate response
self.assertEqual('test response text', ret_val)
# Validate GET contains b64txt and headers
self.assertEqual('lookup_url_returned', self.mockRequests.get.call_args[0][0])
self.assertEqual({'X-Forwarded-For': '8.8.8.8'}, self.mockRequests.get.call_args[1].get('headers'))
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(1, self.mockGetEndpointHost.call_count)
self.assertEqual(1, self.mockRequests.get.call_count)
def test_go_right_startswith_http_get_endpoint_returns_return_data(self):
# Setup test case
self.mockGetEndpointHost.return_value = None, 'myretdata'
self.mockResult.data.as_domain_list.return_value = ['aHR0cHM6Ly9iaXAzMmFkZHJlc3MuY29tL2dldG1pbmU=']
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve('wallet.mattdavid.xyz', 'TXT')
# Validate response
self.assertEqual('myretdata', ret_val)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(1, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_go_right_b64decode_exception(self):
# Setup Test case
self.mockResult.data.as_domain_list.return_value = ['1MSK1PMnDZN4SLDQ6gB4c6GKRExfGD6Gb3']
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve('wallet.mattdavid.xyz', 'TXT')
# Validate response
self.assertEqual('1MSK1PMnDZN4SLDQ6gB4c6GKRExfGD6Gb3', ret_val)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(0, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_go_right_end_of_chain(self):
# Setup Test case
self.mockResult.data.as_domain_list.return_value = ['dGhpc2lzZ3JlYXQx']
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve('wallet.mattdavid.xyz', 'TXT')
# Validate response
self.assertEqual('dGhpc2lzZ3JlYXQx', ret_val)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(0, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_trust_anchor_missing(self):
# Setup Test case
self.mockOS.path.isfile.return_value = False
wns_resolver = WalletNameResolver()
self.assertRaisesRegexp(
Exception,
'Trust anchor is missing or inaccessible',
wns_resolver.resolve,
'wallet.mattdavid.xyz',
'TXT'
)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(0, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(0, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_status_not_0(self):
# Setup Test case
from wnsresolver import WalletNameLookupError
self.mockUnbound.return_value.resolve.return_value = (1, self.mockResult)
wns_resolver = WalletNameResolver()
self.assertRaises(
WalletNameLookupError,
wns_resolver.resolve,
'wallet.mattdavid.xyz',
'TXT'
)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(0, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_insecure_result(self):
# Setup Test case
from wnsresolver import WalletNameLookupInsecureError
self.mockResult.secure = False
wns_resolver = WalletNameResolver()
self.assertRaises(
WalletNameLookupInsecureError,
wns_resolver.resolve,
'wallet.mattdavid.xyz',
'TXT'
)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(0, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_bogus_result(self):
# Setup Test case
from wnsresolver import WalletNameLookupInsecureError
self.mockResult.bogus = True
wns_resolver = WalletNameResolver()
self.assertRaises(
WalletNameLookupInsecureError,
wns_resolver.resolve,
'wallet.mattdavid.xyz',
'TXT'
)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(0, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_havedata_false(self):
# Setup Test case
self.mockResult.havedata = False
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve('wallet.mattdavid.xyz', 'TXT')
# Validate response
self.assertIsNone(ret_val)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(0, self.mockGetEndpointHost.call_count)
self.assertEqual(0, self.mockRequests.get.call_count)
def test_exception_during_lookup_url_get(self):
# Setup Test case
self.mockGetEndpointHost.return_value = 'urls', None
self.mockResult.data.as_domain_list.return_value = ['aHR0cHM6Ly9iaXA3MHBheW1lbnRyZXF1ZXN0LmNvbS9nZXRtaW5l']
self.mockRequests.get.side_effect = Exception()
wns_resolver = WalletNameResolver()
ret_val = wns_resolver.resolve('wallet.mattdavid.xyz', 'TXT')
# Validate response
self.assertEqual('https://bip70paymentrequest.com/getmine', ret_val)
# Validate all calls
self.assertEqual(1, self.mockUnbound.call_count)
self.assertEqual(1, self.mockUnbound.return_value.resolve.call_count)
self.assertEqual(1, self.mockGetEndpointHost.call_count)
self.assertEqual(1, self.mockRequests.get.call_count)
class TestGetEndpointHost(TestCase):
def setUp(self):
self.patcher1 = patch('wnsresolver.socket')
self.mockSocket = self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_go_right_valid_hostname(self):
wns_resolver = WalletNameResolver()
return_url, return_data = wns_resolver.get_endpoint_host('http://www.example.com/pr/uuid')
self.assertEqual('http://www.example.com/pr/uuid', return_url)
self.assertIsNone(return_data)
def test_go_right_valid_ipv4(self):
wns_resolver = WalletNameResolver()
return_url, return_data = wns_resolver.get_endpoint_host('https://8.8.8.8/pr/uuid')
self.assertEqual('https://8.8.8.8/pr/uuid', return_url)
self.assertIsNone(return_data)
def test_go_right_valid_ipv6(self):
wns_resolver = WalletNameResolver()
return_url, return_data = wns_resolver.get_endpoint_host('https://[2001:4860:4860::8888]/pr/uuid')
self.assertEqual('https://[2001:4860:4860::8888]/pr/uuid', return_url)
self.assertIsNone(return_data)
def test_hostname_localhost(self):
wns_resolver = WalletNameResolver()
return_url, return_data = wns_resolver.get_endpoint_host('https://localhost/pr/uuid')
self.assertIsNone(return_url)
self.assertEqual('https://localhost/pr/uuid', return_data)
def test_hostname_is_none(self):
wns_resolver = WalletNameResolver()
return_url, return_data = wns_resolver.get_endpoint_host('https://')
self.assertIsNone(return_url)
self.assertEqual('https://', return_data)
def test_gethostbyname_returns_socket_gaierror(self):
import socket
self.mockSocket.gaierror = socket.gaierror
self.mockSocket.gethostbyname.side_effect = self.mockSocket.gaierror
wns_resolver = WalletNameResolver()
return_url, return_data = wns_resolver.get_endpoint_host('https://nonexistent_hostname/pr/uuid')
self.assertIsNone(return_url)
self.assertEqual('https://nonexistent_hostname/pr/uuid', return_data)
def test_no_route_ip(self):
wns_resolver = WalletNameResolver()
return_url, return_data = wns_resolver.get_endpoint_host('https://192.168.100.1/pr/uuid')
self.assertIsNone(return_url)
self.assertEqual('https://192.168.100.1/pr/uuid', return_data)
class TestPreprocessName(TestCase):
def test_no_change(self):
wns_resolver = WalletNameResolver()
name = wns_resolver.preprocess_name('wallet.domain.com')
self.assertEqual('wallet.domain.com', name)
def test_email_walletname(self):
wns_resolver = WalletNameResolver()
name = wns_resolver.preprocess_name('wallet@domain.com')
self.assertEqual('9e9285c79443cf2c0f868b0216308fb0e3ffeb45ade2c10ac67147f5.domain.com', name)
def test_email_with_multiple_at_signs(self):
wns_resolver = WalletNameResolver()
name = wns_resolver.preprocess_name('wallet@wallet@domain.com')
self.assertEqual('9e9285c79443cf2c0f868b0216308fb0e3ffeb45ade2c10ac67147f5.wallet@domain.com', name)
| |
import os
import smtplib
from flexmock import flexmock
import pytest
import six
import json
try:
import koji
except ImportError:
import inspect
import sys
# Find our mocked koji module
import tests.koji as koji
mock_koji_path = os.path.dirname(inspect.getfile(koji.ClientSession))
if mock_koji_path not in sys.path:
sys.path.append(os.path.dirname(mock_koji_path))
# Now load it properly, the same way the plugin will
del koji
import koji
from atomic_reactor.plugin import PluginFailedException
from atomic_reactor.plugins.pre_check_and_set_rebuild import CheckAndSetRebuildPlugin
from atomic_reactor.plugins.exit_sendmail import SendMailPlugin
from atomic_reactor.plugins.exit_koji_import import KojiImportPlugin
from atomic_reactor.plugins.exit_koji_promote import KojiPromotePlugin
from atomic_reactor import util
from smtplib import SMTPException
MS, MF = SendMailPlugin.MANUAL_SUCCESS, SendMailPlugin.MANUAL_FAIL
AS, AF = SendMailPlugin.AUTO_SUCCESS, SendMailPlugin.AUTO_FAIL
MC, AC = SendMailPlugin.MANUAL_CANCELED, SendMailPlugin.AUTO_CANCELED
MOCK_EMAIL_DOMAIN = "domain.com"
MOCK_KOJI_TASK_ID = 12345
MOCK_KOJI_BUILD_ID = 98765
MOCK_KOJI_PACKAGE_ID = 123
MOCK_KOJI_TAG_ID = 456
MOCK_KOJI_OWNER_ID = 789
MOCK_KOJI_OWNER_NAME = "foo"
MOCK_KOJI_OWNER_EMAIL = "foo@bar.com"
MOCK_KOJI_OWNER_GENERATED = "@".join([MOCK_KOJI_OWNER_NAME, MOCK_EMAIL_DOMAIN])
MOCK_KOJI_SUBMITTER_ID = 123456
MOCK_KOJI_SUBMITTER_NAME = "baz"
MOCK_KOJI_SUBMITTER_EMAIL = "baz@bar.com"
MOCK_KOJI_SUBMITTER_GENERATED = "@".join([MOCK_KOJI_SUBMITTER_NAME, MOCK_EMAIL_DOMAIN])
MOCK_ADDITIONAL_EMAIL = "spam@bar.com"
class MockedClientSession(object):
def __init__(self, hub, opts=None, has_kerberos=True):
self.has_kerberos = has_kerberos
def krb_login(self, principal=None, keytab=None, proxyuser=None):
raise RuntimeError('No certificates provided')
def ssl_login(self, cert=None, ca=None, serverca=None, proxyuser=None):
return True
def getBuild(self, build_id):
assert build_id == MOCK_KOJI_BUILD_ID
return {'package_id': MOCK_KOJI_PACKAGE_ID}
def listTags(self, build_id):
assert build_id == MOCK_KOJI_BUILD_ID
return [{"id": MOCK_KOJI_TAG_ID}]
def getPackageConfig(self, tag_id, package_id):
assert tag_id == MOCK_KOJI_TAG_ID
assert package_id == MOCK_KOJI_PACKAGE_ID
return {"owner_id": MOCK_KOJI_OWNER_ID}
def getUser(self, user_id):
if user_id == MOCK_KOJI_OWNER_ID:
if self.has_kerberos:
return {"krb_principal": MOCK_KOJI_OWNER_EMAIL}
else:
return {"krb_principal": "",
"name": MOCK_KOJI_OWNER_NAME}
elif user_id == MOCK_KOJI_SUBMITTER_ID:
if self.has_kerberos:
return {"krb_principal": MOCK_KOJI_SUBMITTER_EMAIL}
else:
return {"krb_principal": "",
"name": MOCK_KOJI_SUBMITTER_NAME}
else:
assert False, "Don't know user with id %s" % user_id
def getTaskInfo(self, task_id):
assert task_id == MOCK_KOJI_TASK_ID
return {"owner": MOCK_KOJI_SUBMITTER_ID}
def listTaskOutput(self, task_id):
assert task_id == MOCK_KOJI_TASK_ID
return ["openshift-final.log", "build.log"]
class MockedPathInfo(object):
def __init__(self, topdir=None):
self.topdir = topdir
def work(self):
return "{}/work".format(self.topdir)
def taskrelpath(self, task_id):
assert task_id == MOCK_KOJI_TASK_ID
return "tasks/%s" % task_id
class TestSendMailPlugin(object):
def test_fails_with_unknown_states(self):
class WF(object):
exit_results = {}
p = SendMailPlugin(None, WF(),
smtp_host='smtp.bar.com', from_address='foo@bar.com',
send_on=['unknown_state', MS])
with pytest.raises(PluginFailedException) as e:
p.run()
assert str(e.value) == 'Unknown state(s) "unknown_state" for sendmail plugin'
@pytest.mark.parametrize('rebuild, success, auto_canceled, manual_canceled, send_on, expected', [ # noqa
# make sure that right combinations only succeed for the specific state
(False, True, False, False, [MS], True),
(False, True, False, True, [MS], True),
(False, True, False, False, [MF, AS, AF, AC], False),
(False, True, False, True, [MF, AS, AF, AC], False),
(False, False, False, False, [MF], True),
(False, False, False, True, [MF], True),
(False, False, False, False, [MS, AS, AF, AC], False),
(False, False, False, True, [MS, AS, AF, AC], False),
(False, False, True, True, [MC], True),
(False, True, True, True, [MC], True),
(False, True, False, True, [MC], True),
(False, True, False, False, [MC], False),
(True, True, False, False, [AS], True),
(True, True, False, False, [MS, MF, AF, AC], False),
(True, False, False, False, [AF], True),
(True, False, False, False, [MS, MF, AS, AC], False),
(True, False, True, True, [AC], True),
# auto_fail would also give us True in this case
(True, False, True, True, [MS, MF, AS], False),
# also make sure that a random combination of more plugins works ok
(True, False, False, False, [AF, MS], True)
])
def test_should_send(self, rebuild, success, auto_canceled, manual_canceled, send_on, expected):
class WF(object):
exit_results = {
KojiPromotePlugin.key: MOCK_KOJI_BUILD_ID
}
kwargs = {
'smtp_host': 'smtp.bar.com',
'from_address': 'foo@bar.com',
'send_on': send_on,
}
p = SendMailPlugin(None, WF(), **kwargs)
assert p._should_send(rebuild, success, auto_canceled, manual_canceled) == expected
@pytest.mark.parametrize(('autorebuild', 'auto_cancel', 'manual_cancel',
'to_koji_submitter', 'has_koji_logs'), [
(True, False, False, True, True),
(True, True, False, True, True),
(True, False, True, True, True),
(True, False, False, True, False),
(True, True, False, True, False),
(True, False, True, True, False),
(False, False, False, True, True),
(False, True, False, True, True),
(False, False, True, True, True),
(False, False, False, True, False),
(False, True, False, True, False),
(False, False, True, True, False),
(True, False, False, False, True),
(True, True, False, False, True),
(True, False, True, False, True),
(True, False, False, False, False),
(True, True, False, False, False),
(True, False, True, False, False),
(False, False, False, False, True),
(False, True, False, False, True),
(False, False, True, False, True),
(False, False, False, False, False),
(False, True, False, False, False),
(False, False, True, False, False),
])
def test_render_mail(self, monkeypatch, autorebuild, auto_cancel, manual_cancel,
to_koji_submitter, has_koji_logs):
# just test a random combination of the method inputs and hope it's ok for other
# combinations
class TagConf(object):
unique_images = []
class WF(object):
image = util.ImageName.parse('foo/bar:baz')
openshift_build_selflink = '/builds/blablabla'
build_process_failed = False
autorebuild_canceled = auto_cancel
build_canceled = manual_cancel
tag_conf = TagConf()
exit_results = {
KojiPromotePlugin.key: MOCK_KOJI_BUILD_ID
}
monkeypatch.setenv("BUILD", json.dumps({
'metadata': {
'labels': {
'koji-task-id': MOCK_KOJI_TASK_ID,
},
}
}))
session = MockedClientSession('', has_kerberos=True)
pathinfo = MockedPathInfo('https://koji')
if not has_koji_logs:
(flexmock(pathinfo)
.should_receive('work')
.and_raise(RuntimeError, "xyz"))
flexmock(koji, ClientSession=lambda hub, opts: session, PathInfo=pathinfo)
kwargs = {
'url': 'https://something.com',
'smtp_host': 'smtp.bar.com',
'from_address': 'foo@bar.com',
'to_koji_submitter': to_koji_submitter,
'to_koji_pkgowner': False,
'koji_hub': '',
'koji_root': 'https://koji/',
'koji_proxyuser': None,
'koji_ssl_certs_dir': '/certs',
'koji_krb_principal': None,
'koji_krb_keytab': None
}
p = SendMailPlugin(None, WF(), **kwargs)
assert p.koji_root == 'https://koji'
subject, body = p._render_mail(autorebuild, False, auto_cancel, manual_cancel)
# Submitter is updated in _get_receivers_list
try:
p._get_receivers_list()
except Exception:
pass
if to_koji_submitter:
subject, body = p._render_mail(autorebuild, False, auto_cancel, manual_cancel)
status = 'Canceled' if auto_cancel or manual_cancel else 'Failed'
exp_subject = '%s building image foo/bar:baz' % status
exp_body = [
'Image: foo/bar:baz',
'Status: ' + status,
'Submitted by: ',
'Logs: '
]
if autorebuild:
exp_body[2] += '<autorebuild>'
elif to_koji_submitter:
exp_body[2] += MOCK_KOJI_SUBMITTER_EMAIL
else:
exp_body[2] += SendMailPlugin.DEFAULT_SUBMITTER
if has_koji_logs:
exp_body[3] += "https://koji/work/tasks/12345"
else:
exp_body[3] += "https://something.com/builds/blablabla/log"
assert subject == exp_subject
assert body == '\n'.join(exp_body)
@pytest.mark.parametrize(
'has_koji_config, has_addit_address, to_koji_submitter, to_koji_pkgowner, expected_receivers', [ # noqa
(True, True, True, True,
[MOCK_ADDITIONAL_EMAIL, MOCK_KOJI_OWNER_EMAIL, MOCK_KOJI_SUBMITTER_EMAIL]),
(True, False, True, True, [MOCK_KOJI_OWNER_EMAIL, MOCK_KOJI_SUBMITTER_EMAIL]),
(True, False, True, False, [MOCK_KOJI_SUBMITTER_EMAIL]),
(True, False, False, True, [MOCK_KOJI_OWNER_EMAIL]),
(True, True, False, False, [MOCK_ADDITIONAL_EMAIL]),
(True, False, False, False, []),
(False, False, False, False, []),
(False, True, False, True, [MOCK_ADDITIONAL_EMAIL]),
(False, True, True, False, [MOCK_ADDITIONAL_EMAIL]),
])
@pytest.mark.parametrize('use_import', [
(True, False)
])
def test_recepients_from_koji(self, monkeypatch,
has_addit_address,
has_koji_config, to_koji_submitter, to_koji_pkgowner,
expected_receivers, use_import):
class TagConf(object):
unique_images = []
class WF(object):
image = util.ImageName.parse('foo/bar:baz')
openshift_build_selflink = '/builds/blablabla'
build_process_failed = False
tag_conf = TagConf()
if use_import:
exit_results = {
KojiImportPlugin.key: MOCK_KOJI_BUILD_ID,
}
else:
exit_results = {
KojiPromotePlugin.key: MOCK_KOJI_BUILD_ID,
}
monkeypatch.setenv("BUILD", json.dumps({
'metadata': {
'labels': {
'koji-task-id': MOCK_KOJI_TASK_ID,
},
}
}))
session = MockedClientSession('', has_kerberos=True)
flexmock(koji, ClientSession=lambda hub, opts: session, PathInfo=MockedPathInfo)
kwargs = {
'url': 'https://something.com',
'smtp_host': 'smtp.bar.com',
'from_address': 'foo@bar.com',
'to_koji_submitter': to_koji_submitter,
'to_koji_pkgowner': to_koji_pkgowner,
'email_domain': MOCK_EMAIL_DOMAIN
}
if has_addit_address:
kwargs['additional_addresses'] = [MOCK_ADDITIONAL_EMAIL]
if has_koji_config:
kwargs['koji_hub'] = ''
kwargs['koji_proxyuser'] = None
kwargs['koji_ssl_certs_dir'] = '/certs'
kwargs['koji_krb_principal'] = None
kwargs['koji_krb_keytab'] = None
p = SendMailPlugin(None, WF(), **kwargs)
if not expected_receivers:
with pytest.raises(RuntimeError):
p._get_receivers_list()
else:
receivers = p._get_receivers_list()
assert sorted(receivers) == sorted(expected_receivers)
@pytest.mark.parametrize('has_kerberos, expected_receivers', [
(True, [MOCK_KOJI_OWNER_EMAIL, MOCK_KOJI_SUBMITTER_EMAIL]),
(False, [MOCK_KOJI_OWNER_GENERATED, MOCK_KOJI_SUBMITTER_GENERATED])])
def test_generated_email(self, monkeypatch, has_kerberos, expected_receivers):
class TagConf(object):
unique_images = []
class WF(object):
image = util.ImageName.parse('foo/bar:baz')
openshift_build_selflink = '/builds/blablabla'
build_process_failed = False
tag_conf = TagConf()
exit_results = {
KojiPromotePlugin.key: MOCK_KOJI_BUILD_ID
}
monkeypatch.setenv("BUILD", json.dumps({
'metadata': {
'labels': {
'koji-task-id': MOCK_KOJI_TASK_ID,
},
}
}))
session = MockedClientSession('', has_kerberos=has_kerberos)
flexmock(koji, ClientSession=lambda hub, opts: session, PathInfo=MockedPathInfo)
kwargs = {
'url': 'https://something.com',
'smtp_host': 'smtp.bar.com',
'from_address': 'foo@bar.com',
'to_koji_submitter': True,
'to_koji_pkgowner': True,
'email_domain': MOCK_EMAIL_DOMAIN,
'koji_hub': '',
'koji_proxyuser': None,
'koji_ssl_certs_dir': '/certs',
'koji_krb_principal': None,
'koji_krb_keytab': None
}
p = SendMailPlugin(None, WF(), **kwargs)
receivers = p._get_receivers_list()
assert sorted(receivers) == sorted(expected_receivers)
if has_kerberos:
assert p.submitter == MOCK_KOJI_SUBMITTER_EMAIL
else:
assert p.submitter == MOCK_KOJI_SUBMITTER_GENERATED
@pytest.mark.parametrize('exception_location, expected_receivers', [
('koji_connection', []),
('submitter', [MOCK_KOJI_OWNER_EMAIL]),
('empty_submitter', [MOCK_KOJI_OWNER_EMAIL]),
('owner', [MOCK_KOJI_SUBMITTER_EMAIL]),
('empty_owner', [MOCK_KOJI_SUBMITTER_EMAIL]),
('empty_email_domain', [])])
def test_koji_recepients_exception(self, monkeypatch, exception_location, expected_receivers):
class TagConf(object):
unique_images = []
if exception_location == 'empty_owner':
koji_build_id = None
else:
koji_build_id = MOCK_KOJI_BUILD_ID
if exception_location == 'empty_submitter':
koji_task_id = None
else:
koji_task_id = MOCK_KOJI_TASK_ID
class WF(object):
image = util.ImageName.parse('foo/bar:baz')
openshift_build_selflink = '/builds/blablabla'
build_process_failed = False
tag_conf = TagConf()
exit_results = {
KojiPromotePlugin.key: koji_build_id
}
monkeypatch.setenv("BUILD", json.dumps({
'metadata': {
'labels': {
'koji-task-id': koji_task_id,
},
}
}))
has_kerberos = exception_location != 'empty_email_domain'
session = MockedClientSession('', has_kerberos=has_kerberos)
if exception_location == 'koji_connection':
(flexmock(session)
.should_receive('ssl_login')
.and_raise(RuntimeError, "xyz"))
elif exception_location == 'submitter':
(flexmock(session)
.should_receive('getTaskInfo')
.and_raise(RuntimeError, "xyz"))
elif exception_location == 'owner':
(flexmock(session)
.should_receive('getPackageConfig')
.and_raise(RuntimeError, "xyz"))
flexmock(koji, ClientSession=lambda hub, opts: session, PathInfo=MockedPathInfo)
kwargs = {
'url': 'https://something.com',
'smtp_host': 'smtp.bar.com',
'from_address': 'foo@bar.com',
'to_koji_submitter': True,
'to_koji_pkgowner': True,
'koji_hub': '',
'koji_proxyuser': None,
'koji_ssl_certs_dir': '/certs',
'koji_krb_principal': None,
'koji_krb_keytab': None
}
if exception_location != 'empty_email_domain':
kwargs['email_domain'] = MOCK_EMAIL_DOMAIN
p = SendMailPlugin(None, WF(), **kwargs)
if not expected_receivers:
with pytest.raises(RuntimeError):
p._get_receivers_list()
else:
receivers = p._get_receivers_list()
assert sorted(receivers) == sorted(expected_receivers)
@pytest.mark.parametrize('throws_exception', [False, True])
def test_send_mail(self, throws_exception):
class WF(object):
exit_results = {}
p = SendMailPlugin(None, WF(), from_address='foo@bar.com', smtp_host='smtp.spam.com')
class SMTP(object):
def sendmail(self, from_addr, to, msg):
pass
def quit(self):
pass
smtp_inst = SMTP()
flexmock(smtplib).should_receive('SMTP').and_return(smtp_inst)
sendmail_chain = (flexmock(smtp_inst).should_receive('sendmail').
with_args('foo@bar.com', ['spam@spam.com'], str))
if throws_exception:
sendmail_chain.and_raise(smtplib.SMTPException, "foo")
flexmock(smtp_inst).should_receive('quit')
if throws_exception:
with pytest.raises(SMTPException) as e:
p._send_mail(['spam@spam.com'], 'subject', 'body')
assert str(e.value) == 'foo'
else:
p._send_mail(['spam@spam.com'], 'subject', 'body')
def test_run_ok(self):
class TagConf(object):
unique_images = []
class WF(object):
autorebuild_canceled = False
build_canceled = False
prebuild_results = {CheckAndSetRebuildPlugin.key: True}
image = util.ImageName.parse('repo/name')
build_process_failed = True
tag_conf = TagConf()
exit_results = {}
receivers = ['foo@bar.com', 'x@y.com']
p = SendMailPlugin(None, WF(),
from_address='foo@bar.com', smtp_host='smtp.spam.com',
send_on=[AF])
(flexmock(p).should_receive('_should_send')
.with_args(True, False, False, False).and_return(True))
flexmock(p).should_receive('_get_receivers_list').and_return(receivers)
flexmock(p).should_receive('_send_mail').with_args(receivers, six.text_type, six.text_type)
p.run()
def test_run_fails_to_obtain_receivers(self):
class TagConf(object):
unique_images = []
class WF(object):
autorebuild_canceled = False
build_canceled = False
prebuild_results = {CheckAndSetRebuildPlugin.key: True}
image = util.ImageName.parse('repo/name')
build_process_failed = True
tag_conf = TagConf()
exit_results = {}
error_addresses = ['error@address.com']
p = SendMailPlugin(None, WF(),
from_address='foo@bar.com', smtp_host='smtp.spam.com',
send_on=[AF], error_addresses=error_addresses)
(flexmock(p).should_receive('_should_send')
.with_args(True, False, False, False).and_return(True))
flexmock(p).should_receive('_get_receivers_list').and_raise(RuntimeError())
flexmock(p).should_receive('_send_mail').with_args(error_addresses, six.text_type,
six.text_type)
p.run()
def test_run_does_nothing_if_conditions_not_met(self):
class WF(object):
autorebuild_canceled = False
build_canceled = False
prebuild_results = {CheckAndSetRebuildPlugin.key: True}
image = util.ImageName.parse('repo/name')
build_process_failed = True
exit_results = {}
p = SendMailPlugin(None, WF(),
from_address='foo@bar.com', smtp_host='smtp.spam.com',
send_on=[MS])
(flexmock(p).should_receive('_should_send')
.with_args(True, False, False, False).and_return(False))
flexmock(p).should_receive('_get_receivers_list').times(0)
flexmock(p).should_receive('_send_mail').times(0)
p.run()
| |
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import os
import sys
import random
from abc import ABCMeta, abstractmethod
from ..auxiliary.datastructs import Point2D
from ..tools.configuration import ConfigMgr
from . import Environment
__all__ = ['Cell', 'GridWorld']
class Cell(object):
"""The abstract cell module.
A cell is a base unit in a 2d-grid. The :class:`GridWorld` is composed
of cells.
Parameters
----------
x : int
The x-position of the cell.
y : int
The y-position of the cell.
func : callable
A callback function to find the neighboring cells.
Notes
-----
Every class inheriting from Cell must implement :meth:`is_occupied`.
"""
__metaclass__ = ABCMeta
@property
def x(self):
"""The x-position of the cell.
Returns
-------
int :
The x-position.
"""
return self._x
@property
def y(self):
"""The y-position of the cell.
Returns
-------
int :
The y-position.
"""
return self._y
@property
def neighbors(self):
"""The cell's neighbors.
Returns
-------
list[Point2D] :
A list of neighbors.
"""
return self._neighbors
def __init__(self, x, y, func):
self.data = None
self._x = x
self._y = y
self._find_neighbors(func)
@abstractmethod
def is_occupied(self):
"""Determines if the cell is occupied.
Returns
-------
bool :
Whether the cell is occupied.
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
def _find_neighbors(self, func):
"""Find the neighboring cells"""
neighbors = [func(self, d) for d in range(4)]
self._neighbors = [coord for coord in neighbors if coord is not None]
class GridWorld(Environment):
"""A gridworld consisting of a 2d-grid.
A gridworld's basic unit is a cell. Each cell has four
neighbors corresponding to the actions the agent can take
in the four compass directions (N, S, E, W).
Parameters
----------
width : int
The number of cells in the x-direction.
height : int
The number of cells in the y-direction.
agents : Agent or list[Agent]
A list of agents that act in the gridworld.
filename : str
The name of the file containing the configuration of
the gridworld.
Notes
-----
Within the gridworld, the agent's location is denoted by `o`.
"""
@property
def width(self):
"""The number of cells in the x-direction.
Returns
-------
int :
The width.
"""
return self._width
@property
def height(self):
"""The number of cells in the y-direction.
Returns
-------
int :
The height.
"""
return self._height
def __init__(self, width=20, height=20, agents=None, filename=None):
super(GridWorld, self).__init__(agents)
self._width = width
self._height = height
self._grid = []
""":type: list[list[Cell]]"""
self._config_mgr = None
""":type: ConfigMgr"""
if filename is not None:
self.load(filename)
def __str__(self):
column = 3
result = ""
spaces = sum([column + 1 for _ in range(len(self._grid[0]))])
result += '-' * spaces
result += '\n'
for i, item in enumerate(self._grid):
result += '%s%s%s%s' % ('|'.join([self._columnize(obj.data, column, 'Center')
for obj in item]), '|\n', '-' * spaces, '\n')
return result
def reset(self, t, **kwargs):
"""Reset the agent's state.
Parameters
----------
t : float
The current time (sec).
kwargs : dict, optional
Non-positional parameters.
"""
super(GridWorld, self).reset(t, **kwargs)
self._initialize()
def update(self, dt):
"""Update the agents.
Parameters
----------
dt : float
The elapsed time (sec)
"""
super(GridWorld, self).update(dt)
for i, agent in enumerate(self._agents):
# prevLoc = agent.loc
agent.update(dt)
# if prevLoc != agent.loc:
# self.display.redrawCell(prevLoc.x, prevLoc.y)
# self.display.redrawCell(agent.loc.x, agent.loc.y)
# self.display.update()
def make_cell(self, x, y):
"""Create the new cell.
x : int
The x-coordinate within the gridworld.
y : int
The y-coordinate within the gridworld.
Returns
-------
Cell :
The created cell.
"""
return Cell(x, y, self.move_coords)
def load(self, f):
"""Loads the world from file.
If a `*.cfg` with the same name exists, the configurations
are being loaded as well.
Parameters
----------
f : str or file
The file instance or the filename.
"""
assert(f is file or isinstance(f, basestring))
fh = f
if isinstance(f, basestring):
fh = file(f)
data = fh.readlines()
fh.close()
# Load config file if it exists
if isinstance(f, basestring):
if os.path.isfile(f):
base = os.path.splitext(f)[0]
cf = base + ".config"
self._config_mgr = ConfigMgr(cf)
data = [line.rstrip('\n') for line in data]
self._height = len(data)
self._width = max([len(x) for x in data])
self._initialize()
for y in range(self._height):
for x in range(min(self._width, len(data[y]))):
self._grid[y][x].data = data[y][x]
def save(self, f=None):
"""Save the world to file.
Parameters
----------
f : str or file
The file instance or the filename.
"""
assert(f is file or isinstance(f, basestring))
total = ""
for y in range(self._height):
line = ""
for x in range(self.width):
line += self._grid[y][x].data
total += "%s\n" % line
fh = f
if isinstance(f, basestring):
fh = file(f, "w")
fh.write(total)
fh.close()
def get_cell(self, loc):
"""Return the cell based on its x/y-coordinates.
Parameters
----------
loc : Point2D
The x-/y-coordinates of the cell.
Returns
-------
Cell :
The cell at the specified location.
"""
try:
value = self._grid[loc.y][loc.x]
except Exception as e:
sys.exit(e)
return value
def find_cells(self, data):
"""Find the cells containing given data.
Parameters
----------
data : str
The data to match the cell to.
Returns
-------
list[Cell] :
All cells that contain the specified data.
"""
return [c for x in self._grid for c in x if c.data == data]
def move_coords(self, cell, move):
"""
Returns the coordinates of the neighboring cell the agent transitions
to following the given move.
Parameters
----------
cell : Cell
The current cell.
move : int
The action performed.
Returns
-------
Point2D :
The x-/y-coordinates of the resulting cell.
"""
dx, dy = [(0, -1), (1, 0), (0, 1), (-1, 0)][move]
coords = Point2D(cell.x + dx, cell.y + dy)
if coords.x < 0 or coords.x >= self._width or coords.y < 0 or coords.y >= self._height:
coords = None
return coords
def random_location(self):
"""
Find a random unoccupied location within the grid.
Returns
-------
Point2D :
The random x-/y-coordinates.
"""
while True:
x = random.randrange(self._width)
y = random.randrange(self._height)
loc = Point2D(x, y)
cell = self.get_cell(loc)
if not cell.is_occupied():
return loc
def set_start_loc(self, loc):
"""
Set the agent's starting location.
Parameters
----------
loc : Point2D
The x-/y-coordinates the agent starts out in.
"""
cells = self.find_cells('o')
for c in cells:
c.data = ''
self._grid[loc.y][loc.x].data = 'o'
def _initialize(self):
self._grid = [[self.make_cell(i, j) for i in range(self._width)] for j in range(self._height)]
# noinspection PyMethodMayBeStatic
def _columnize(self, word, width, align='Left'):
"""
Create a column from a string
Parameters
----------
word: str
The string to be processed.
width: int
The width of the column.
align: {'Left', 'Right'}
The column alignment.
Returns
-------
str :
Columnized string.
"""
nspaces = width - len(word)
if nspaces < 0:
nspaces = 0
if align == 'Left':
return word + (" " * nspaces)
if align == 'Right':
return (" " * nspaces) + word
return (" " * (nspaces / 2)) + word + (" " * (nspaces - nspaces / 2))
| |
#!/usr/bin/env python
"""
Train overcomplete ICA on van Hateren image patches.
"""
import sys
sys.path.append('./code')
from models import ISA, MoGaussian, StackedModel, ConcatModel, Distribution
from tools import preprocess, Experiment, mapp, imsave, imformat, stitch
from transforms import LinearTransform, WhiteningTransform
from numpy import seterr, sqrt, dot, load, hstack, eye
from numpy.random import rand
# controls parallelization
mapp.max_processes = 8
# controls how much information is printed during training
Distribution.VERBOSITY = 2
# PS, OC, TI, FI, LP, SC
parameters = [
# complete models
['8x8', 1, 20, 10, True, False],
['16x16', 1, 30, 15, True, False],
# overcomplete models
['8x8', 2, 1000, 100, True, False],
['16x16', 2, 1000, 100, True, False],
# overcomplete models with Laplace marginals
['8x8', 2, 200, 100, False, False],
['8x8', 2, 100, 100, False, True],
['16x16', 2, 200, 100, False, True],
# initialize with sparse coding
['8x8', 2, 100, 100, True, True],
['8x8', 3, 100, 100, True, True],
['8x8', 4, 100, 200, True, True],
['16x16', 2, 50, 100, True, True],
['16x16', 2, 50, 100, True, True],
['16x16', 2, 50, 100, True, True],
['8x8', 1, 20, 10, True, False],
]
def main(argv):
if len(argv) < 2:
print 'Usage:', argv[0], '<param_id>', '[experiment]'
print
print ' {0:>3} {1:>7} {2:>5} {3:>5} {4:>5} {5:>5} {6:>5}'.format(
'ID', 'PS', 'OC', 'TI', 'FI', 'LP', 'SC')
for id, params in enumerate(parameters):
print ' {0:>3} {1:>7} {2:>5} {3:>5} {4:>5} {5:>5} {6:>5}'.format(id, *params)
print
print ' ID = parameter set'
print ' PS = patch size'
print ' OC = overcompleteness'
print ' TI = number of training iterations'
print ' FI = number of fine-tuning iterations'
print ' LP = optimize marginal distributions'
print ' SC = initialize with sparse coding'
return 0
seterr(invalid='raise', over='raise', divide='raise')
# start experiment
experiment = Experiment()
# hyperparameters
patch_size, \
overcompleteness, \
max_iter, \
max_iter_ft, \
train_prior, \
sparse_coding = parameters[int(argv[1])]
### DATA PREPROCESSING
# load data, log-transform and center data
data = load('data/vanhateren.{0}.1.npz'.format(patch_size))['data']
data = data[:, :100000]
data = preprocess(data)
# discrete cosine transform and whitening transform
dct = LinearTransform(dim=int(sqrt(data.shape[0])), basis='DCT')
wt = WhiteningTransform(dct(data)[1:], symmetric=True)
### MODEL DEFINITION
isa = ISA(num_visibles=data.shape[0] - 1,
num_hiddens=data.shape[0] * overcompleteness - 1, ssize=1)
# model DC component with a mixture of Gaussians
model = StackedModel(dct,
ConcatModel(MoGaussian(20), StackedModel(wt, isa)))
### MODEL TRAINING
# variables to store in results
experiment['model'] = model
experiment['parameters'] = parameters[int(argv[1])]
def callback(phase, isa, iteration):
"""
Saves intermediate results every few iterations.
"""
if not iteration % 5:
# whitened filters
A = dot(dct.A[1:].T, isa.A)
patch_size = int(sqrt(A.shape[0]) + 0.5)
# save intermediate results
experiment.save('results/vanhateren.{0}/results.{1}.{2}.xpck'.format(argv[1], phase, iteration))
# visualize basis
imsave('results/vanhateren.{0}/basis.{1}.{2:0>3}.png'.format(argv[1], phase, iteration),
stitch(imformat(A.T.reshape(-1, patch_size, patch_size))))
if len(argv) > 2:
# initialize model with trained model
results = Experiment(argv[2])
model = results['model']
isa = model.model[1].model
dct = model.transforms[0]
experiment['model'] = model
else:
# enable regularization of marginals
for gsm in isa.subspaces:
gsm.gamma = 1e-3
gsm.alpha = 2.
gsm.beta = 1.
# train mixture of Gaussians on DC component
model.train(data, 0, max_iter=100)
# initialize filters and marginals
model.initialize(data, 1)
model.initialize(model=1, method='laplace')
experiment.progress(10)
if sparse_coding:
# initialize with sparse coding
if patch_size == '16x16':
model.train(data, 1,
method=('of', {
'max_iter': max_iter,
'noise_var': 0.05,
'var_goal': 1.,
'beta': 10.,
'step_width': 0.01,
'sigma': 0.3,
}),
callback=lambda isa, iteration: callback(0, isa, iteration))
else:
model.train(data, 1,
method=('of', {
'max_iter': max_iter,
'noise_var': 0.1,
'var_goal': 1.,
'beta': 10.,
'step_width': 0.01,
'sigma': 0.5,
}),
callback=lambda isa, iteration: callback(0, isa, iteration))
isa.orthogonalize()
else:
if patch_size == '16x16':
# prevents out-of-memory
mapp.max_processes = 1
# train model using a subset of the data
model.train(data[:, :20000], 1,
max_iter=max_iter,
train_prior=train_prior,
persistent=True,
init_sampling_steps=5,
method=('sgd', {'momentum': 0.8}),
callback=lambda isa, iteration: callback(0, isa, iteration),
sampling_method=('gibbs', {'num_steps': 1}))
experiment.progress(50)
if patch_size == '16x16':
# prevents out-of-memory
mapp.max_processes = 1
# disable regularization
for gsm in isa.subspaces:
gsm.gamma = 0.
# fine-tune model using all the data
model.train(data, 1,
max_iter=max_iter_ft,
train_prior=train_prior,
train_subspaces=False,
persistent=True,
init_sampling_steps=10 if not len(argv) > 2 and (sparse_coding or not train_prior) else 50,
method=('lbfgs', {'max_fun': 50}),
callback=lambda isa, iteration: callback(1, isa, iteration),
sampling_method=('gibbs', {'num_steps': 2}))
experiment.save('results/vanhateren/vanhateren.{0}.{{0}}.{{1}}.xpck'.format(argv[1]))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
#
# Copyright (C) 2013 Comcast Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
import urllib2
import urlparse
from lxml import etree
def _normalize_whitespace(s):
return ' '.join(s.split())
def _extract_text_help(root, acc):
if root.text is not None and root.text.strip():
acc.append(_normalize_whitespace(root.text.strip()))
for child in root.getchildren():
acc = _extract_text_help(child, acc)
if child.tail is not None and child.tail.strip():
acc.append(_normalize_whitespace(child.tail.strip()))
return acc
def _extract_text(root):
return ' '.join(_extract_text_help(root,[]))
def _extract(elt, doc):
"""This function takes a given DOM node 'elt' and attempts to interpret
it as a Python value of some sort (possibly an object)."""
if 'itemtype' in elt.attrib or 'itemscope' in elt.attrib:
return MicrodataObject(elt, doc)
tag = elt.tag
if tag == 'a' and 'href' in elt.attrib:
href = elt.attrib['href']
if href.startswith('#'):
target = doc._doc.getroot().find(".//*[@id='%s']" % href[1:])
if target is not None: return _extract(target, doc)
else:
up = urlparse.urlparse(href)
remote_doc = enter(urlparse.urlunparse((up.scheme, up.netloc, up.path, up.params, up.query, '')))
if up.fragment:
target = remote_doc._doc.getroot().find(".//*[@id='%s']" % up.fragment)
if target is not None: return _extract(target, remote_doc)
if len(remote_doc.objects) == 1: return remote_doc.objects[0]
return _extract(remote_doc._doc.getroot(), remote_doc)
if tag == 'img': return elt.attrib['src']
return _extract_text(elt)
def _value_of(doc, fragment=''):
if fragment:
target = doc._doc.getroot().find(".//*[@id='%s']" % fragment)
if target is not None: return _extract(target, doc)
if len(doc.objects) == 1: return doc.objects[0]
if len(doc.objects) > 0: return doc.objects
return _extract(doc._doc.getroot(), doc)
class Link(object):
"""Links are basically a representation of HTML <a> tags. The main
thing you can do with a Link is to follow it."""
def __init__(self, elt, doc):
self._elt = elt
self._doc = doc
def __repr__(self):
return "<Link %s at 0x%x>" % (self._elt.attrib['href'], id(self))
def follow(self):
href = self._elt.attrib['href']
resolved = urlparse.urljoin(self._doc._url, href)
up = urlparse.urlparse(resolved)
resolved_base = urlparse.urlunparse((up.scheme, up.netloc, up.path,
up.params, up.query, ''))
if resolved_base == self._doc._url:
# local
return _value_of(self._doc, up.fragment)
else:
# remote
remote_doc = enter(resolved_base)
return _value_of(remote_doc, up.fragment)
class Form(object):
"""Forms are a representation of an HTML <form> tag. Then main thing
you can do with a form is to 'submit' one by providing a dictionary
of key-value pairs corresponding to the values to supply to the form's
<input> elements. N.B. This is not fully implemented per the HTML spec,
as we only support <input> and not, for example, <textarea> or <select>
at this point. The other useful thing you can do with a Form is to ask
it for its .params field, which returns a list of the input names
provided."""
def __init__(self, elt, doc):
self._elt = elt
self._doc = doc
def __repr__(self):
if 'data-rel' not in self._elt.attrib:
return "<Form at 0x%x>" % id(self)
return "<Form %s at 0x%x>" % (self._elt.attrib['data-rel'], id(self))
def _set_value_for(self, elt, args, params):
if 'name' not in elt.attrib: return
name = elt.attrib['name']
if name in args:
params[name] = args[name]
else:
if 'value' in elt.attrib:
params[name] = elt.attrib['value']
else:
params[name] = ""
def _get_params(self):
out = []
for elt in self._elt.findall(".//input"):
if 'type' in elt.attrib and elt.attrib['type'] == 'hidden':
continue
if 'name' in elt.attrib: out.append(elt.attrib['name'])
return out
params = property(_get_params)
def _build_params(self, args):
params = {}
for elt in self._elt.findall(".//textarea"):
self._set_value_for(elt, args, params)
for elt in self._elt.findall(".//input"):
self._set_value_for(elt, args, params)
return urllib.urlencode(params)
def submit(self, args={}):
action = urlparse.urljoin(self._doc._url, self._elt.attrib['action'])
params = self._build_params(args)
if 'method' not in self._elt.attrib or self._elt.attrib['method'] == 'GET':
up = urlparse.urlparse(action)
if up.params: allparams = "%s&%s" % (up.params, params)
else: allparams = params
where = urlparse.urlunparse((up.scheme, up.netloc, up.path,
up.params, allparams, ''))
return enter(where)
else:
print "POST", action, "...",
f = urllib2.urlopen(action, params)
print "OK"
return MicrodataDocument(f, action)
class MicrodataObject(object):
"""This represents a particular semantic object, i.e. something identified
by an @itemscope attribute. MicrodataObjects have several useful properties
besides their actual semantic @itemprop properties:
.props = return names of (local) microdata @itemprop properties
.itemtype = return the @itemtype of this object
.links = return a list of Link objects contained by this object
.forms = return a list of Form objects contained by this object
There is also a shortcut method .submit() that will submit the first
contained form with the given link relation (as notated by the @data-rel
attribute)."""
def __init__(self, root, doc):
self._root = root
self._doc = doc
self._propmap = None
self._linkmap = None
self._formmap = None
self._orphan_forms = None
def __repr__(self):
t = self.itemtype
if t is None: return "<untyped at 0x%x>" % id(self)
return "<%s at 0x%x>" % (self.itemtype, id(self))
def _dfs_build_help(self, elt):
if 'itemprop' in elt.attrib:
prop = elt.attrib['itemprop']
if prop not in self._propmap: self._propmap[prop] = []
self._propmap[prop].append(elt)
if 'itemscope' in elt.attrib: return
for child in elt.getchildren():
self._dfs_build_help(child)
def _dfs_form_help(self, elt):
if elt.tag == 'form':
if 'data-rel' in elt.attrib:
rel = elt.attrib['data-rel']
if rel not in self._formmap: self._formmap[rel] = []
self._formmap[rel].append(Form(elt, self._doc))
else:
self._orphan_forms.append(Form(elt, self._doc))
if 'itemscope' in elt.attrib: return
for child in elt.getchildren():
self._dfs_form_help(child)
def _build_formmap(self):
self._formmap = {}
self._orphan_forms = []
for child in self._root.getchildren():
self._dfs_form_help(child)
def _dfs_link_help(self, elt):
if elt.tag == 'a' and 'rel' in elt.attrib:
rel = elt.attrib['rel']
if rel not in self._linkmap: self._linkmap[rel] = []
self._linkmap[rel].append(Link(elt, self._doc))
if 'itemscope' in elt.attrib: return
for child in elt.getchildren():
self._dfs_link_help(child)
def _build_linkmap(self):
self._linkmap = {}
for child in self._root.getchildren():
self._dfs_link_help(child)
def _build_propmap(self):
self._propmap = {}
for child in self._root.getchildren():
self._dfs_build_help(child)
def _get_propmap(self):
if self._propmap is None: self._build_propmap()
return self._propmap
def __len__(self): return self._get_propmap().__len__()
def __contains__(self,x): return self._get_propmap().__contains__(x)
def __iter__(self): return self._get_propmap().__iter__()
def get_property(self, prop, raw=False, allow_multi=True):
propmap = self._get_propmap()
if prop not in propmap:
self_link = self.get_links("self", raw=False, allow_multi=False)
if self_link is not None:
alt = self_link.follow()
if alt is not None and type(alt) == MicrodataObject:
return alt.get_property(prop, raw, allow_multi)
return None
vals = propmap[prop]
if not raw:
vals = map(lambda v : _extract(v, self._doc), vals)
if len(vals) == 0: return None
if len(vals) == 1 or not allow_multi: return vals[0]
return vals
def get_props(self):
return self._get_propmap().keys()
props = property(get_props)
def get_itemtype(self):
if 'itemtype' not in self._root.attrib: return None
return self._root.attrib['itemtype']
itemtype = property(get_itemtype)
def _get_linkmap(self):
if self._linkmap is None: self._build_linkmap()
return self._linkmap
links = property(_get_linkmap)
def _get_formmap(self):
if self._formmap is None: self._build_formmap()
return self._formmap
forms = property(_get_formmap)
def submit(self, rel, args):
return self.forms[rel][0].submit(args)
def get_links(self, rel, raw=False, allow_multi=True):
linkmap = self._get_linkmap()
if rel not in linkmap: return None
links = linkmap[rel]
if raw:
return map(lambda l : l._elt, links)
if len(links) == 0: return None
if len(links) == 1 or not allow_multi: return links[0]
return out
def __getitem__(self, name):
return self.get_property(name, raw=False, allow_multi=False)
def __getattr__(self, name):
return self.get_property(name, raw=False, allow_multi=False)
class MicrodataDocument:
"""MicrodataDocuments represent a client application state, usually the
result of evaluating an entry point via enter(), following a Link, or
submitting a Form. Useful properties include:
.forms = return all @data-rel annotated forms
.allforms = return all <form> elements regardless of annotation
.links = return all top-level Links (<a> tags, not <link> tags at the
moment)
.objects = returns all top-level MicrodataObjects (ones that are not
enclosed by another MicrodataObject)
Plus the following convenience methods:
.follow(rel) = follow the first Link with the given link relation
.submit(rel, args) = submit the first Form with the given link relation,
using the 'args' dictionary to supply values for the input elements"""
def __init__(self, f, url):
parser = etree.HTMLParser()
self._doc = etree.parse(f, parser)
self._url = url
def _dfs_help(self, root, acc):
if 'itemtype' in root.attrib and 'itemprop' not in root.attrib:
acc.append(MicrodataObject(root, self))
return acc
for child in root.getchildren():
acc = self._dfs_help(child, acc)
return acc
def _get_forms(self):
fake_obj = MicrodataObject(self._doc.getroot(), self)
return fake_obj.forms
forms = property(_get_forms)
def _get_links(self):
fake_obj = MicrodataObject(self._doc.getroot(), self)
return fake_obj.links
links = property(_get_links)
def _get_orphan_forms(self):
fake_obj = MicrodataObject(self._doc.getroot(), self)
return fake_obj._orphan_forms
orphan_forms = property(_get_orphan_forms)
def _get_all_forms(self):
return map(lambda elt : Form(elt, self),
self._doc.getroot().findall(".//form"))
allforms = property(_get_all_forms)
def follow(self, rel):
return self.links[rel][0].follow()
def submit(self, rel, args):
return self.forms[rel][0].submit(args)
def get_toplevel_objects(self):
return self._dfs_help(self._doc.getroot(), [])
objects = property(get_toplevel_objects)
def enter(url):
print "GET", url, "...",
f = urllib2.urlopen(url)
print "OK"
return MicrodataDocument(f, url)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to replicate model_fn's over local GPUs.
This file contains util that allow to replicate `Estimator.model_fn` over
GPUs. Replicated version of a `model_fn` is returned that can subsequently
be used with `Estimator`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import device as framework_device
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import training_util
def replicate_model_fn(model_fn, optimizer_fn, devices=None):
"""Replicate `Estimator.model_fn` over GPUs within a single host.
The given `model_fn` specifies a single forward pass of a model. To replicate
such a model over GPUs, each GPU gets its own instance of the forward pass
(a.k.a. a tower). The input features and labels get sharded into the chunks
that correspond to the number of GPUs. Each tower computes its own loss based
on its input. For each such loss, gradients are computed. After that, the
available losses are summed to form aggregated loss. The available
gradients are summed too. Then, they update weights using the specified
optimizer.
If `devices` are `None`, then all available GPUs are going to be used for
replication. If no GPUs are available, then the model is going to be
placed on the CPU.
Two modes of local replication over available GPUs are supported:
1) If exactly 1 GPU is detected, then variables and operations are placed
onto GPU.
2) If more than 1 GPU is detected, then variables are going to be placed on
the CPU. Replicas of operations are placed on each individual GPU.
Here is an example of how one might use their `model_fn` to run over GPUs:
```python
def optimizer_fn():
return tf.train.GradientDescentOptimizer(learning_rate=0.001)
...
def model_fn(...): # See `model_fn` in `Estimator`.
loss = ...
if mode == tf.estimator.ModeKeys.TRAIN:
# See the section below on `EstimatorSpec.train_op`.
return EstimatorSpec(mode=mode, loss=loss, train_op=tf.noop())
# No change for `ModeKeys.EVAL` or `ModeKeys.PREDICT`.
return EstimatorSpec(...)
...
classifier = tf.estimator.Estimator(
model_fn=replicate_model_fn.replicate_model_fn(model_fn, optimizer_fn))
```
On `EstimatorSpec.train_op`:
`model_fn` returns `EstimatorSpec.train_op` for
`tf.estimator.GraphKeys.TRAIN`. It is typically derived using an optimizer.
`replicate_model_fn` ignores the returned `EstimatorSpec.train_op`, so there
is no need to use an optimizer inside the user's `model_fn`. The
`EstimatorSpec.loss` subgraph is going to be executed, while
`EstimatorSpec.train_op` isn't going to be executed. One could pass
`train_op=tf.noop()` to `EstimatorSpec`.
On sharding input features and labels:
Input features and labels are split for consumption by each tower. They are
split across the dimension 0. Features and labels need to be batch major.
On reduction algorithms:
Certain algorithms were chosen for aggregating results of computations on
multiple towers:
- Losses from all towers are reduced using sum.
- Gradients are reduced using sum for each trainable variable.
- `eval_metrics_ops` are reduced per metric using `reduce_mean`.
- `EstimatorSpec.predictions` and `EstimatorSpec.export_outputs` are
reduced using concatenation.
- For all other fields of `EstimatorSpec` the values of the first tower
are taken.
On replication of variables:
Variables are not duplicated between towers. Instead, they are placed on a
single device as defined above and shared across towers.
Other current limitations:
- `predictions` are not supported for `ModeKeys.EVAL`. That is required for
`tf.contrib.estimator.add_metrics`.
Args:
model_fn: `model_fn` as defined in `Estimator`. See the section above about
the train_op argument of `EstimatorSpec`.
optimizer_fn: a function that returns an optimizer instance. The function
may accept one `params` argument. This is the `params` argument as
defined by `Estimator`. See the `Estimator` documentation for details.
devices: Optional list of devices to replicate the model across. This
argument can be used to replice only on the subset of available GPUs.
If `None`, then all available GPUs are going to be used for replication.
If no GPUs are available, then the model is going to be placed on the CPU.
Returns:
A replicated version of the supplied `model_fn`. Returned function that
conforms to the requirements of `Estimator`'s `model_fn` and can be used
instead of the supplied `model_fn`.
"""
if not devices:
devices = _get_local_devices('GPU') or _get_local_devices('CPU')
is_a_single_gpu_case = len(devices) == 1 and 'GPU' in devices[0]
local_ps_device = '/{}:0'.format('GPU' if is_a_single_gpu_case else 'CPU')
tf_logging.info('Replicating the `model_fn` across {}. Local parameter '
'server device is going to be {}.'.format(
devices, local_ps_device))
def replicated_model_fn(features, labels, mode, params=None, config=None):
"""Replicated version of `model_fn` to be used instead."""
feature_shards, label_shards = _split_batch(
features, labels, len(devices), device=local_ps_device)
tower_specs = _get_loss_towers(
model_fn=model_fn,
mode=mode,
features=feature_shards,
labels=label_shards,
params=params,
config=config,
devices=devices,
local_ps_device=local_ps_device)
if mode == model_fn_lib.ModeKeys.TRAIN:
train_op = _minimize_towers(tower_specs,
_call_optimizer_fn(optimizer_fn, params))
return _train_spec(
tower_specs, train_op, aggregation_device=local_ps_device)
elif mode == model_fn_lib.ModeKeys.EVAL:
return _eval_spec(tower_specs, aggregation_device=local_ps_device)
elif mode == model_fn_lib.ModeKeys.PREDICT:
return _predict_spec(tower_specs, aggregation_device=local_ps_device)
return replicated_model_fn
def _get_local_devices(device_type):
local_device_protos = device_lib.list_local_devices()
return [
device.name
for device in local_device_protos
if device.device_type == device_type
]
def _split_batch(features, labels, number_of_shards, device):
"""Split input features and labes into batches."""
def split_dictionary(dictionary):
"""Split a dictionary into shards."""
shards = [{} for _ in range(number_of_shards)]
for name, tensor in six.iteritems(dictionary):
if isinstance(tensor, sparse_tensor.SparseTensor):
for i, shard in enumerate(
sparse_ops.sparse_split(
sp_input=tensor, num_split=number_of_shards, axis=0)):
shards[i][name] = shard
else:
for i, shard in enumerate(array_ops.split(tensor, number_of_shards)):
shards[i][name] = shard
return shards
with ops_lib.name_scope('split_inputs'):
with ops_lib.device(device):
if isinstance(features, dict):
feature_shards = split_dictionary(features)
else:
feature_shards = array_ops.split(features, number_of_shards)
if labels is None:
label_shards = None
elif isinstance(labels, dict):
label_shards = split_dictionary(labels)
else:
label_shards = array_ops.split(labels, number_of_shards)
return feature_shards, label_shards
_DEFAULT_NAME_SCOPE_PATTERN = 'tower_{}'
def _get_loss_towers(model_fn,
mode,
features,
labels,
params,
config,
devices,
local_ps_device,
name_scope_pattern=_DEFAULT_NAME_SCOPE_PATTERN):
"""Replicate the loss computation across devices."""
tower_specs = []
model_fn_args = util.fn_args(model_fn)
optional_params = {}
if 'params' in model_fn_args:
optional_params['params'] = copy.deepcopy(params)
if 'config' in model_fn_args:
optional_params['config'] = copy.deepcopy(config)
for i, device in enumerate(devices):
is_the_first_tower = (i == 0)
device_setter = _local_device_setter(
worker_device=device, ps_device=local_ps_device)
# We would like to preserve the names of the variables and ops that a user
# might be relying on. Names with prefix are going to resolve to variables
# and ops of the first tower.
name_scope = name_scope_pattern
if is_the_first_tower:
name_scope = ''
with variable_scope.variable_scope('', reuse=not is_the_first_tower):
with ops_lib.name_scope(name_scope.format(i)):
with ops_lib.device(device_setter):
labels_shard = None
if labels:
labels_shard = labels[i]
tower_specs.append(
model_fn(
mode=mode,
features=features[i],
labels=labels_shard,
**optional_params))
return tower_specs
def _local_device_setter(ps_device, worker_device):
"""A device setter that puts distributes Var/Ops to PS/workers."""
ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
def local_device_chooser(op):
current_device = framework_device.DeviceSpec.from_string(op.device or '')
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = framework_device.DeviceSpec.from_string(
'{}'.format(ps_device))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
else:
worker_device_spec = framework_device.DeviceSpec.from_string(
worker_device or '')
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return local_device_chooser
def _minimize_towers(tower_specs, optimizer):
"""Aggregate and apply gradients for computed losses."""
grad_lists = {}
for tower_spec in tower_specs:
with ops_lib.device(tower_spec.loss.device):
for grad, var in optimizer.compute_gradients(tower_spec.loss):
if grad is not None:
grad_lists.setdefault(var, []).append(grad)
aggregated_grads = []
with ops_lib.name_scope('gradient_aggregating'):
for var, grads in six.iteritems(grad_lists):
grad = _compute_sum_on_device(grads, var.device)
aggregated_grads.append((grad, var))
train_op = optimizer.apply_gradients(
aggregated_grads, global_step=training_util.get_global_step())
return train_op
def _call_optimizer_fn(optimizer_fn, params):
arguments = {}
optimizer_fn_arguments = util.fn_args(optimizer_fn)
if 'params' in optimizer_fn_arguments:
arguments['params'] = params
return optimizer_fn(**arguments)
def _compute_sum_on_device(values, device, name=None):
with ops_lib.device(device):
if isinstance(values[0], ops_lib.IndexedSlices):
if name:
raise ValueError('The name {} is not expected to be given to '
'IndexedSlices {}'.format(name, values))
values_concat = array_ops.concat([v.values for v in values], axis=0)
indices_concat = array_ops.concat([v.indices for v in values], axis=0)
return ops_lib.IndexedSlices(values_concat, indices_concat,
values[0].dense_shape)
else:
return math_ops.add_n(values, name=name)
def _train_spec(tower_specs,
train_op,
aggregation_device,
aggregated_loss_name='loss'):
"""Populate replicated EstimatorSpec for `GraphKeys.TRAIN`."""
estimator_spec = tower_specs[0]._asdict()
estimator_spec['mode'] = model_fn_lib.ModeKeys.TRAIN
estimator_spec['train_op'] = train_op
estimator_spec['loss'] = _compute_sum_on_device(
[spec.loss for spec in tower_specs], aggregation_device,
aggregated_loss_name)
return model_fn_lib.EstimatorSpec(**estimator_spec)
def _eval_spec(tower_specs, aggregation_device, aggregated_loss_name='loss'):
"""Populate replicated EstimatorSpec for `GraphKeys.EVAL`."""
estimator_spec = tower_specs[0]._asdict()
estimator_spec['mode'] = model_fn_lib.ModeKeys.EVAL
estimator_spec['loss'] = _compute_sum_on_device(
[spec.loss for spec in tower_specs], aggregation_device,
aggregated_loss_name)
update_ops = []
for tower_spec in tower_specs:
for name, (_, update_op) in six.iteritems(tower_spec.eval_metric_ops):
update_ops.append(update_op)
with ops_lib.control_dependencies(update_ops):
reduced_update_op = _reduce_metric_variables(len(tower_specs))
eval_metric_ops = {}
for name, (metric_tensor, _) in six.iteritems(tower_specs[0].eval_metric_ops):
eval_metric_ops[name] = (metric_tensor, reduced_update_op)
estimator_spec['eval_metric_ops'] = eval_metric_ops
return model_fn_lib.EstimatorSpec(**estimator_spec)
def _reduce_metric_variables(number_of_towers):
"""Aggregate local variables used in metrics into the first tower."""
if number_of_towers == 1:
return control_flow_ops.no_op()
metric_variables = ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)
variables_per_tower = len(metric_variables) // number_of_towers
if len(metric_variables) % number_of_towers != 0:
raise ValueError(
'Different `EstimatorSpec.eval_metric_ops` across `model_fn()` calls.'
' Expected {} local variables, but got {} instead.'.format(
variables_per_tower * number_of_towers, len(metric_variables)))
# `metric_variables` has the size of `variables_per_tower` x
# number_of_towers. Each tower is produced by calling the same model_fn.
# First `variables_per_tower` correspond to the first tower. Each such
# variable has an replica at the `(variables_per_tower * i)` position, where
# `i` is `[1.. number_of_towers]`. We are going to add values from replicas
# to each variable of the first tower. We then zero out replica values, so
# that `_reduce_metric_variables` operation is idempotent. If a metric
# is then computed based on local variables from the first tower, then the
# resulting metric is an estimate for all `number_of_towers` towers.
ops = []
for i in range(0, variables_per_tower):
next_replica_id = i + variables_per_tower
replicas = [
metric_variables[replica_id]
for replica_id in range(next_replica_id, len(metric_variables),
variables_per_tower)
] # `replicas` doesn't contain the first-tower variable.
reduce_op = state_ops.assign_add(metric_variables[i],
math_ops.add_n(replicas))
with ops_lib.control_dependencies([reduce_op]):
for replica in replicas:
zeros_for_replica = array_ops.zeros(
array_ops.shape(replica), dtype=replica.dtype)
zero_out_replica_op = state_ops.assign(replica, zeros_for_replica)
ops.append(zero_out_replica_op)
return control_flow_ops.group(*ops)
def _predict_spec(tower_specs, aggregation_device):
"""Populate replicated EstimatorSpec for `GraphKeys.PREDICT`."""
estimator_spec = tower_specs[0]._asdict()
estimator_spec['mode'] = model_fn_lib.ModeKeys.PREDICT
with ops_lib.device(aggregation_device):
estimator_spec['predictions'] = _concat_tensor_dicts(
*[tower_spec.predictions for tower_spec in tower_specs])
export_outputs_dict = _dict_concat(
*[tower_spec.export_outputs for tower_spec in tower_specs])
export_outputs = {}
for name, export_output_list in six.iteritems(export_outputs_dict):
if isinstance(export_output_list[0], export_output_lib.PredictOutput):
export_outputs[name] = export_output_lib.PredictOutput(
outputs=_concat_tensor_dicts(*[
export_output.outputs for export_output in export_output_list
]))
elif isinstance(export_output_list[0],
export_output_lib.RegressionOutput):
export_outputs[name] = export_output_lib.RegressionOutput(
value=array_ops.concat(
[export_output.value for export_output in export_output_list],
axis=0))
elif isinstance(export_output_list[0],
export_output_lib.ClassificationOutput):
scores = None
if export_output_list[0].scores is not None:
scores = array_ops.concat(
[export_output.scores for export_output in export_output_list],
axis=0)
classes = None
if export_output_list[0].classes is not None:
classes = array_ops.stack(
[export_output.classes for export_output in export_output_list],
axis=0)
export_outputs[name] = export_output_lib.ClassificationOutput(
scores=scores, classes=classes)
estimator_spec['export_outputs'] = export_outputs
return model_fn_lib.EstimatorSpec(**estimator_spec)
def _concat_tensor_dicts(*tensor_dicts):
return {
name: array_ops.concat(tensors, axis=0, name=name)
for name, tensors in six.iteritems(_dict_concat(*tensor_dicts))
}
def _dict_concat(*dicts):
list_dict = {}
for d in dicts:
if d is None:
continue
for k, v in six.iteritems(d):
list_dict.setdefault(k, []).append(v)
return list_dict
| |
"""
====================
Liftover pipeline
====================
:Author: Andreas Heger
:Release: $Id: pipeline_liftover.py 2900 2010-04-13 14:38:00Z andreas $
:Date: |today|
:Tags: Python
The liftover pipeline maps a set of intervals from one or more genomes
to a target genome. It uses the :term:`liftover` tool from UCSC.
Overview
========
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
The sphinxreport report requires a :file:`conf.py` and :file:`sphinxreport.ini` file
(see :ref:`PipelineReporting`). To start with, use the files supplied with the
Example_ data.
Input
-----
Requirements
------------
Pipeline output
===============
Example
=======
Example data is available at http://www.cgat.org/~andreas/sample_data/pipeline_rnaseq.tgz.
To run the example, simply unpack and untar::
wget http://www.cgat.org/~andreas/sample_data/pipeline_rnaseq.tgz
tar -xvzf pipeline_rnaseq.tgz
cd pipeline_rnaseq
python <srcdir>/pipeline_rnaseq.py make full
.. note::
For the pipeline to run, install the :doc:`pipeline_annotations` as well.
Glossary
========
.. glossary::
liftover
ucsc_ tool to convert coordinates between assemblies
.. todo::
* make the merging step optional. Currently overlapping intervals are merged.
Code
====
"""
import sys
import glob
import os
import gzip
from ruffus import *
import CGATPipelines.Pipeline as P
import CGATPipelines.PipelineTracks as PipelineTracks
import CGAT.IOTools as IOTools
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
PARAMS = P.PARAMS
###################################################################
###################################################################
###################################################################
##
###################################################################
if os.path.exists("pipeline_conf.py"):
L.info("reading additional configuration from pipeline_conf.py")
exec(compile(open("pipeline_conf.py").read(), "pipeline_conf.py", 'exec'))
PARAMS = P.getParameters()
###################################################################
###################################################################
# Helper functions mapping tracks to conditions, etc
###################################################################
TRACKS = PipelineTracks.Tracks(PipelineTracks.Sample).loadFromDirectory(
glob.glob("*.gtf.gz"), "(\S+).gtf.gz", exclude=(".mapped.gtf.gz", ))
#####################################################################
#####################################################################
#####################################################################
@transform(TRACKS.getTracks("%s.gtf.gz"),
suffix(".gtf.gz"),
'.psl.gz')
def convertGtf2Psl(infile, outfile):
"""convert a gtf to a psl file.
This method only takes features of type 'exon' and
skips all contigs that are not in the genome sequence
(for example the variant human chromosomes).
"""
track = outfile[:-len(".psl.gz")]
genomefile = os.path.join(
PARAMS["genome_dir"], PARAMS["%s_genome" % track])
if not os.path.exists(genomefile + ".fasta"):
raise IOError("genome %s does not exist" % genomefile)
statement = """gunzip
< %(infile)s
| awk '$3 == "exon"'
| cgat gff2gff
--method=sanitize
--sanitize-method=genome
--skip-missing
--genome=%(genomefile)s
--log=%(outfile)s.log
| cgat gff2psl
--allow-duplicates
--is-gtf
--log=%(outfile)s.log
| gzip > %(outfile)s
"""
P.run()
###################################################################
@transform('*.bed.gz',
suffix(".bed.gz"),
'.psl.gz')
def convertBed2Psl(infile, outfile):
"""convert a bed to a psl file."""
track = outfile[:-len(".bed.gz")]
genomefile = os.path.join(
PARAMS["genome_dir"], PARAMS["%s_genome" % track])
if not os.path.exists(genomefile + ".fasta"):
raise IOError("genome %s does not exist" % genomefile)
statement = """gunzip < %(infile)s
| cgat bed2psl
--genome=%(genomefile)s
--log=%(outfile)s.log
| gzip > %(outfile)s
"""
P.run()
###################################################################
###################################################################
###################################################################
@transform((convertGtf2Psl, convertBed2Psl), suffix(".psl.gz"), '.transcripts')
def mergeTranscripts(infile, outfile):
"""merge transcripts before mapping.
Overlapping transcripts are combined in order to
speed up the mapping process.
"""
track = outfile[:-len(".transcripts")]
genomefile = os.path.join(
PARAMS["genome_dir"], PARAMS["%s_genome" % track])
statement = """
gunzip < %(infile)s
| awk '/psLayout/ { x = 4; next; } x > 0 { --x; next} { print; }'
| sort -k 14,14 -k 16,16n
| %(cmd-farm)s
--split-at-column=14
--output-header
--renumber="%%06i"
--renumber-column=":id"
--log=%(outfile)s.log
--subdirs \
"cgat psl2assembly
--staggered=all
--method=region
--method=transcript
--threshold-merge-distance=0
--threshold-merge-overlap=3
--genome=%(genomefile)s
--mali-output-format=fasta
--log=%(outfile)s.log
--output-filename-pattern=%%DIR%%%(outfile)s.%%s"
> %(outfile)s"""
P.run()
@transform(mergeTranscripts, suffix(".transcripts"), '.merged.mapped.psl')
def mapMergedTranscripts(infile, outfile):
"""map transcripts from PSL.
Mapping from PSL is equivalent to first converting to genePred format
and using the option -gp.
"""
track = outfile[:-len(".merged.mapped.psl")]
chainfile = os.path.join(PARAMS["ucsc_dir"],
PARAMS["%s_genome" % track],
"liftOver",
"%sTo%s.over.chain.gz" %
(PARAMS["%s_genome" % track],
PARAMS["genome"].capitalize()))
if not os.path.exists(chainfile):
raise IOError("chain file %s does not exist" % chainfile)
statement = """
liftOver -minMatch=0.2 -minBlocks=0.01 -pslT
%(infile)s.transcripts.psl
<(gunzip < %(chainfile)s)
%(outfile)s
%(outfile)s.unmapped
>& %(outfile)s.log
"""
P.run()
@transform((convertGtf2Psl, convertBed2Psl), suffix(".psl.gz"), '.mapped.psl')
def mapTranscripts(infile, outfile):
"""map transcripts from PSL.
Mapping from PSL is equivalent to first converting to genePred format
and using the option -gp.
"""
track = outfile[:-len(".mapped.psl")]
chainfile = os.path.join(PARAMS["ucsc_dir"],
PARAMS["%s_genome" % track],
"liftOver",
"%sTo%s.over.chain.gz" %
(PARAMS["%s_genome" % track],
PARAMS["genome"].capitalize()))
statement = """
liftOver -minMatch=0.2 -minBlocks=0.01 -pslT
<(gunzip < %(infile)s )
<(gunzip < %(chainfile)s)
%(outfile)s
%(outfile)s.unmapped
>& %(outfile)s.log
"""
P.run()
@transform((mapMergedTranscripts, mapTranscripts), suffix(".psl"), '.gtf.gz')
def convertMappedPslToGtf(infile, outfile):
'''convert to gtf for export.'''
statement = """
cgat psl2gff --as-gtf
< %(infile)s
| gzip
> %(outfile)s
"""
P.run()
@transform(convertMappedPslToGtf, suffix(".gtf.gz"), '.summary')
def summary(infile, outfile):
'''compute mapping stats.'''
def _getfiles(filename):
track = outfile[:-len(".mapped.summary")]
if track.endswith(".merged"):
xtrack = track[:-len(".merged")]
finput = "%s.psl.gz" % xtrack
fmerged = "%s.transcripts.transcripts.psl" % xtrack
fmapped = "%s.mapped.psl" % track
else:
finput = "%s.psl.gz" % track
fmerged = finput
fmapped = "%s.mapped.psl" % track
return track, finput, fmerged, fmapped
outf = open(outfile, "w")
outf.write("track\tinput\tmerged\tpmerged\tmapped\tpmapped\tpoutput\n")
def countPSL(filename):
if filename.endswith(".gz"):
i = gzip.open(filename)
else:
i = open(filename)
ll = [x[:10] for x in i.readlines() if not x.startswith("#")]
if ll[0].startswith("psLayout"):
return len(ll) - 5
else:
return len(ll)
track, finput, fmerged, fmapped = _getfiles(outfile)
ninput = countPSL(finput)
# subtract header
nmerged = countPSL(fmerged) - 5
nmapped = countPSL(fmapped)
outf.write("%s\t%i\t%i\t%s\t%i\t%s\t%s\n" %
(track,
ninput,
nmerged,
IOTools.prettyPercent(nmerged, ninput),
nmapped,
IOTools.prettyPercent(nmapped, nmerged),
IOTools.prettyPercent(nmapped, ninput)))
@follows(convertMappedPslToGtf, summary)
def full():
pass
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| |
"""Tests for the flux_led integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import contextmanager
import datetime
from unittest.mock import AsyncMock, MagicMock, patch
from flux_led import DeviceType
from flux_led.aio import AIOWifiLedBulb
from flux_led.const import (
COLOR_MODE_CCT as FLUX_COLOR_MODE_CCT,
COLOR_MODE_RGB as FLUX_COLOR_MODE_RGB,
WhiteChannelType,
)
from flux_led.models_db import MODEL_MAP
from flux_led.protocol import (
LEDENETRawState,
PowerRestoreState,
PowerRestoreStates,
RemoteConfig,
)
from flux_led.scanner import FluxLEDDiscovery
from homeassistant.components import dhcp
from homeassistant.components.flux_led.const import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
MODULE = "homeassistant.components.flux_led"
MODULE_CONFIG_FLOW = "homeassistant.components.flux_led.config_flow"
IP_ADDRESS = "127.0.0.1"
MODEL_NUM_HEX = "0x35"
MODEL_NUM = 0x35
MODEL = "AK001-ZJ2149"
MODEL_DESCRIPTION = "Bulb RGBCW"
MAC_ADDRESS = "aa:bb:cc:dd:ee:ff"
FLUX_MAC_ADDRESS = "AABBCCDDEEFF"
SHORT_MAC_ADDRESS = "DDEEFF"
DEFAULT_ENTRY_TITLE = f"{MODEL_DESCRIPTION} {SHORT_MAC_ADDRESS}"
DHCP_DISCOVERY = dhcp.DhcpServiceInfo(
hostname=MODEL,
ip=IP_ADDRESS,
macaddress=MAC_ADDRESS,
)
FLUX_DISCOVERY_PARTIAL = FluxLEDDiscovery(
ipaddr=IP_ADDRESS,
model=MODEL,
id=FLUX_MAC_ADDRESS,
model_num=None,
version_num=None,
firmware_date=None,
model_info=None,
model_description=None,
)
FLUX_DISCOVERY = FluxLEDDiscovery(
ipaddr=IP_ADDRESS,
model=MODEL,
id=FLUX_MAC_ADDRESS,
model_num=MODEL_NUM,
version_num=0x04,
firmware_date=datetime.date(2021, 5, 5),
model_info=MODEL,
model_description=MODEL_DESCRIPTION,
remote_access_enabled=True,
remote_access_host="the.cloud",
remote_access_port=8816,
)
def _mock_config_entry_for_bulb(hass: HomeAssistant) -> ConfigEntry:
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE},
unique_id=MAC_ADDRESS,
)
config_entry.add_to_hass(hass)
return config_entry
def _mocked_bulb() -> AIOWifiLedBulb:
bulb = MagicMock(auto_spec=AIOWifiLedBulb)
async def _save_setup_callback(callback: Callable) -> None:
bulb.data_receive_callback = callback
bulb.device_type = DeviceType.Bulb
bulb.requires_turn_on = True
bulb.async_setup = AsyncMock(side_effect=_save_setup_callback)
bulb.effect_list = ["some_effect"]
bulb.remote_config = RemoteConfig.OPEN
bulb.async_unpair_remotes = AsyncMock()
bulb.async_set_time = AsyncMock()
bulb.async_set_music_mode = AsyncMock()
bulb.async_set_custom_pattern = AsyncMock()
bulb.async_set_preset_pattern = AsyncMock()
bulb.async_set_effect = AsyncMock()
bulb.async_set_white_temp = AsyncMock()
bulb.async_set_brightness = AsyncMock()
bulb.async_set_device_config = AsyncMock()
bulb.async_config_remotes = AsyncMock()
bulb.white_channel_channel_type = WhiteChannelType.WARM
bulb.paired_remotes = 2
bulb.pixels_per_segment = 300
bulb.segments = 2
bulb.diagnostics = {"mock_diag": "mock_diag"}
bulb.music_pixels_per_segment = 150
bulb.music_segments = 4
bulb.operating_mode = "RGB&W"
bulb.operating_modes = ["RGB&W", "RGB/W"]
bulb.wirings = ["RGBW", "GRBW", "BGRW"]
bulb.wiring = "BGRW"
bulb.ic_types = ["WS2812B", "UCS1618"]
bulb.ic_type = "WS2812B"
bulb.async_stop = AsyncMock()
bulb.async_update = AsyncMock()
bulb.async_turn_off = AsyncMock()
bulb.async_turn_on = AsyncMock()
bulb.async_set_levels = AsyncMock()
bulb.async_set_zones = AsyncMock()
bulb.async_disable_remote_access = AsyncMock()
bulb.async_enable_remote_access = AsyncMock()
bulb.min_temp = 2700
bulb.max_temp = 6500
bulb.getRgb = MagicMock(return_value=[255, 0, 0])
bulb.getRgbw = MagicMock(return_value=[255, 0, 0, 50])
bulb.getRgbww = MagicMock(return_value=[255, 0, 0, 50, 0])
bulb.getRgbcw = MagicMock(return_value=[255, 0, 0, 0, 50])
bulb.rgb = (255, 0, 0)
bulb.rgb_unscaled = (255, 0, 0)
bulb.rgbw = (255, 0, 0, 50)
bulb.rgbww = (255, 0, 0, 50, 0)
bulb.rgbcw = (255, 0, 0, 0, 50)
bulb.color_temp = 2700
bulb.getWhiteTemperature = MagicMock(return_value=(2700, 128))
bulb.brightness = 128
bulb.model_num = MODEL_NUM
bulb.model_data = MODEL_MAP[MODEL_NUM]
bulb.effect = None
bulb.speed = 50
bulb.model = "Bulb RGBCW (0x35)"
bulb.version_num = 8
bulb.speed_adjust_off = True
bulb.rgbwcapable = True
bulb.color_modes = {FLUX_COLOR_MODE_RGB, FLUX_COLOR_MODE_CCT}
bulb.color_mode = FLUX_COLOR_MODE_RGB
bulb.raw_state = LEDENETRawState(
0, 0x35, 0, 0x61, 0x5, 50, 255, 0, 0, 50, 8, 0, 0, 0
)
return bulb
def _mocked_switch() -> AIOWifiLedBulb:
switch = MagicMock(auto_spec=AIOWifiLedBulb)
async def _save_setup_callback(callback: Callable) -> None:
switch.data_receive_callback = callback
switch.device_type = DeviceType.Switch
switch.power_restore_states = PowerRestoreStates(
channel1=PowerRestoreState.LAST_STATE,
channel2=PowerRestoreState.LAST_STATE,
channel3=PowerRestoreState.LAST_STATE,
channel4=PowerRestoreState.LAST_STATE,
)
switch.pixels_per_segment = None
switch.segments = None
switch.music_pixels_per_segment = None
switch.music_segments = None
switch.operating_mode = None
switch.operating_modes = None
switch.wirings = None
switch.wiring = None
switch.ic_types = None
switch.ic_type = None
switch.requires_turn_on = True
switch.async_set_time = AsyncMock()
switch.async_reboot = AsyncMock()
switch.async_setup = AsyncMock(side_effect=_save_setup_callback)
switch.async_set_power_restore = AsyncMock()
switch.async_stop = AsyncMock()
switch.async_update = AsyncMock()
switch.async_turn_off = AsyncMock()
switch.async_turn_on = AsyncMock()
switch.model_num = 0x97
switch.model_data = MODEL_MAP[0x97]
switch.model = "Switch (0x97)"
switch.version_num = 0x97
switch.raw_state = LEDENETRawState(
0, 0x97, 0, 0x61, 0x97, 50, 255, 0, 0, 50, 8, 0, 0, 0
)
return switch
async def async_mock_device_turn_off(hass: HomeAssistant, bulb: AIOWifiLedBulb) -> None:
"""Mock the device being off."""
bulb.is_on = False
bulb.raw_state._replace(power_state=0x24)
bulb.data_receive_callback()
await hass.async_block_till_done()
async def async_mock_device_turn_on(hass: HomeAssistant, bulb: AIOWifiLedBulb) -> None:
"""Mock the device being on."""
bulb.is_on = True
bulb.raw_state._replace(power_state=0x23)
bulb.data_receive_callback()
await hass.async_block_till_done()
async def async_mock_effect_speed(
hass: HomeAssistant, bulb: AIOWifiLedBulb, effect: str, speed: int
) -> None:
"""Mock the device being on with an effect."""
bulb.speed = speed
bulb.effect = effect
bulb.data_receive_callback()
await hass.async_block_till_done()
def _patch_discovery(device=None, no_device=False):
async def _discovery(*args, **kwargs):
if no_device:
raise OSError
return [] if no_device else [device or FLUX_DISCOVERY]
@contextmanager
def _patcher():
with patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.async_scan",
new=_discovery,
), patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.getBulbInfo",
return_value=[] if no_device else [device or FLUX_DISCOVERY],
):
yield
return _patcher()
def _patch_wifibulb(device=None, no_device=False):
def _wifi_led_bulb(*args, **kwargs):
bulb = _mocked_bulb()
if no_device:
bulb.async_setup = AsyncMock(side_effect=asyncio.TimeoutError)
return bulb
return device if device else _mocked_bulb()
return patch("homeassistant.components.flux_led.AIOWifiLedBulb", new=_wifi_led_bulb)
| |
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
""" A few useful function/method decorators. """
from __future__ import print_function
__docformat__ = "restructuredtext en"
import sys
import types
from time import clock, time
from inspect import isgeneratorfunction, getargspec
from logilab.common.compat import method_type
# XXX rewrite so we can use the decorator syntax when keyarg has to be specified
class cached_decorator(object):
def __init__(self, cacheattr=None, keyarg=None):
self.cacheattr = cacheattr
self.keyarg = keyarg
def __call__(self, callableobj=None):
assert not isgeneratorfunction(callableobj), \
'cannot cache generator function: %s' % callableobj
if len(getargspec(callableobj).args) == 1 or self.keyarg == 0:
cache = _SingleValueCache(callableobj, self.cacheattr)
elif self.keyarg:
cache = _MultiValuesKeyArgCache(callableobj, self.keyarg, self.cacheattr)
else:
cache = _MultiValuesCache(callableobj, self.cacheattr)
return cache.closure()
class _SingleValueCache(object):
def __init__(self, callableobj, cacheattr=None):
self.callable = callableobj
if cacheattr is None:
self.cacheattr = '_%s_cache_' % callableobj.__name__
else:
assert cacheattr != callableobj.__name__
self.cacheattr = cacheattr
def __call__(__me, self, *args):
try:
return self.__dict__[__me.cacheattr]
except KeyError:
value = __me.callable(self, *args)
setattr(self, __me.cacheattr, value)
return value
def closure(self):
def wrapped(*args, **kwargs):
return self.__call__(*args, **kwargs)
wrapped.cache_obj = self
try:
wrapped.__doc__ = self.callable.__doc__
wrapped.__name__ = self.callable.__name__
except:
pass
return wrapped
def clear(self, holder):
holder.__dict__.pop(self.cacheattr, None)
class _MultiValuesCache(_SingleValueCache):
def _get_cache(self, holder):
try:
_cache = holder.__dict__[self.cacheattr]
except KeyError:
_cache = {}
setattr(holder, self.cacheattr, _cache)
return _cache
def __call__(__me, self, *args, **kwargs):
_cache = __me._get_cache(self)
try:
return _cache[args]
except KeyError:
_cache[args] = __me.callable(self, *args)
return _cache[args]
class _MultiValuesKeyArgCache(_MultiValuesCache):
def __init__(self, callableobj, keyarg, cacheattr=None):
super(_MultiValuesKeyArgCache, self).__init__(callableobj, cacheattr)
self.keyarg = keyarg
def __call__(__me, self, *args, **kwargs):
_cache = __me._get_cache(self)
key = args[__me.keyarg-1]
try:
return _cache[key]
except KeyError:
_cache[key] = __me.callable(self, *args, **kwargs)
return _cache[key]
def cached(callableobj=None, keyarg=None, **kwargs):
"""Simple decorator to cache result of method call."""
kwargs['keyarg'] = keyarg
decorator = cached_decorator(**kwargs)
if callableobj is None:
return decorator
else:
return decorator(callableobj)
class cachedproperty(object):
""" Provides a cached property equivalent to the stacking of
@cached and @property, but more efficient.
After first usage, the <property_name> becomes part of the object's
__dict__. Doing:
del obj.<property_name> empties the cache.
Idea taken from the pyramid_ framework and the mercurial_ project.
.. _pyramid: http://pypi.python.org/pypi/pyramid
.. _mercurial: http://pypi.python.org/pypi/Mercurial
"""
__slots__ = ('wrapped',)
def __init__(self, wrapped):
try:
wrapped.__name__
except AttributeError:
raise TypeError('%s must have a __name__ attribute' %
wrapped)
self.wrapped = wrapped
@property
def __doc__(self):
doc = getattr(self.wrapped, '__doc__', None)
return ('<wrapped by the cachedproperty decorator>%s'
% ('\n%s' % doc if doc else ''))
def __get__(self, inst, objtype=None):
if inst is None:
return self
val = self.wrapped(inst)
setattr(inst, self.wrapped.__name__, val)
return val
def get_cache_impl(obj, funcname):
cls = obj.__class__
member = getattr(cls, funcname)
if isinstance(member, property):
member = member.fget
return member.cache_obj
def clear_cache(obj, funcname):
"""Clear a cache handled by the :func:`cached` decorator. If 'x' class has
@cached on its method `foo`, type
>>> clear_cache(x, 'foo')
to purge this method's cache on the instance.
"""
get_cache_impl(obj, funcname).clear(obj)
def copy_cache(obj, funcname, cacheobj):
"""Copy cache for <funcname> from cacheobj to obj."""
cacheattr = get_cache_impl(obj, funcname).cacheattr
try:
setattr(obj, cacheattr, cacheobj.__dict__[cacheattr])
except KeyError:
pass
class wproperty(object):
"""Simple descriptor expecting to take a modifier function as first argument
and looking for a _<function name> to retrieve the attribute.
"""
def __init__(self, setfunc):
self.setfunc = setfunc
self.attrname = '_%s' % setfunc.__name__
def __set__(self, obj, value):
self.setfunc(obj, value)
def __get__(self, obj, cls):
assert obj is not None
return getattr(obj, self.attrname)
class classproperty(object):
"""this is a simple property-like class but for class attributes.
"""
def __init__(self, get):
self.get = get
def __get__(self, inst, cls):
return self.get(cls)
class iclassmethod(object):
'''Descriptor for method which should be available as class method if called
on the class or instance method if called on an instance.
'''
def __init__(self, func):
self.func = func
def __get__(self, instance, objtype):
if instance is None:
return method_type(self.func, objtype, objtype.__class__)
return method_type(self.func, instance, objtype)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def timed(f):
def wrap(*args, **kwargs):
t = time()
c = clock()
res = f(*args, **kwargs)
print('%s clock: %.9f / time: %.9f' % (f.__name__,
clock() - c, time() - t))
return res
return wrap
def locked(acquire, release):
"""Decorator taking two methods to acquire/release a lock as argument,
returning a decorator function which will call the inner method after
having called acquire(self) et will call release(self) afterwards.
"""
def decorator(f):
def wrapper(self, *args, **kwargs):
acquire(self)
try:
return f(self, *args, **kwargs)
finally:
release(self)
return wrapper
return decorator
def monkeypatch(klass, methodname=None):
"""Decorator extending class with the decorated callable. This is basically
a syntactic sugar vs class assignment.
>>> class A:
... pass
>>> @monkeypatch(A)
... def meth(self):
... return 12
...
>>> a = A()
>>> a.meth()
12
>>> @monkeypatch(A, 'foo')
... def meth(self):
... return 12
...
>>> a.foo()
12
"""
def decorator(func):
try:
name = methodname or func.__name__
except AttributeError:
raise AttributeError('%s has no __name__ attribute: '
'you should provide an explicit `methodname`'
% func)
setattr(klass, name, func)
return func
return decorator
| |
"""
Copyright 2015 Hewlett-Packard
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import elasticsearch
import logging
import os
from freezer_api.common import _i18n
from freezer_api.common import exceptions as freezer_api_exc
from freezer_api.common import utils
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class TypeManager(object):
def __init__(self, es, doc_type, index):
self.es = es
self.index = index
self.doc_type = doc_type
@staticmethod
def get_base_search_filter(user_id, search=None):
search = search or {}
user_id_filter = {"term": {"user_id": user_id}}
base_filter = [user_id_filter]
match_list = [{"match": m} for m in search.get('match', [])]
match_not_list = [{"match": m} for m in search.get('match_not', [])]
base_filter.append({"query": {"bool": {"must": match_list,
"must_not": match_not_list}}})
return base_filter
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
try:
base_filter = TypeManager.get_base_search_filter(user_id, search)
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
except Exception:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('search operation failed: query not valid'))
def get(self, user_id, doc_id):
try:
res = self.es.get(index=self.index,
doc_type=self.doc_type,
id=doc_id)
doc = res['_source']
except elasticsearch.TransportError:
raise freezer_api_exc.DocumentNotFound(
message=_i18n._('No document found with ID %s') % doc_id)
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('Get operation failed: %s') % e)
if doc['user_id'] != user_id:
raise freezer_api_exc.AccessForbidden(
_i18n._("Document access forbidden"))
if '_version' in res:
doc['_version'] = res['_version']
return doc
def search(self, user_id, doc_id=None, search=None, offset=0, limit=10):
search = search or {}
query_dsl = self.get_search_query(user_id, doc_id, search)
try:
res = self.es.search(index=self.index, doc_type=self.doc_type,
size=limit, from_=offset, body=query_dsl)
except elasticsearch.ConnectionError:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('unable to connect to db server'))
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('search operation failed: %s') % e)
hit_list = res['hits']['hits']
return [x['_source'] for x in hit_list]
def insert(self, doc, doc_id=None):
try:
# remove _version from the document
doc.pop('_version', None)
res = self.es.index(index=self.index, doc_type=self.doc_type,
body=doc, id=doc_id)
created = res['created']
version = res['_version']
self.es.indices.refresh(index=self.index)
except elasticsearch.TransportError as e:
if e.status_code == 409:
raise freezer_api_exc.DocumentExists(message=e.error)
raise freezer_api_exc.StorageEngineError(
message=_i18n._('index operation failed %s') % e)
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('index operation failed %s') % e)
return (created, version)
def delete(self, user_id, doc_id):
query_dsl = self.get_search_query(user_id, doc_id)
try:
results = self.es.search(index=self.index,
doc_type=self.doc_type,
body=query_dsl)
results = results['hits']['hits']
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('Scan operation failed: %s') % e)
id = None
for res in results:
id = res.get('_id')
try:
self.es.delete(index=self.index, doc_type=self.doc_type, id=id)
self.es.indices.refresh(index=self.index)
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('Delete operation failed: %s') % e)
return id
class BackupTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"backup_id": doc_id}})
if 'time_after' in search:
base_filter.append(
{"range": {"timestamp": {"gte": int(search['time_after'])}}}
)
if 'time_before' in search:
base_filter.append(
{"range": {"timestamp": {"lte": int(search['time_before'])}}}
)
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
class ClientTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"client.client_id": doc_id}})
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
class JobTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"job_id": doc_id}})
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
def update(self, job_id, job_update_doc):
# remove _version from the document
job_update_doc.pop('_version', 0)
update_doc = {"doc": job_update_doc}
try:
res = self.es.update(index=self.index, doc_type=self.doc_type,
id=job_id, body=update_doc)
version = res['_version']
self.es.indices.refresh(index=self.index)
except elasticsearch.TransportError as e:
if e.status_code == 409:
raise freezer_api_exc.DocumentExists(message=e.error)
raise freezer_api_exc.DocumentNotFound(
message=_i18n._('Unable to find job to update '
'with id %(id)s. %(e)s') % {'id': job_id,
'e': e})
except Exception:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('Unable to update job with id %s') % job_id)
return version
class ActionTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"action_id": doc_id}})
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
def update(self, action_id, action_update_doc):
# remove _version from the document
action_update_doc.pop('_version', 0)
update_doc = {"doc": action_update_doc}
try:
res = self.es.update(index=self.index, doc_type=self.doc_type,
id=action_id, body=update_doc)
version = res['_version']
self.es.indices.refresh(index=self.index)
except elasticsearch.TransportError as e:
if e.status_code == 409:
raise freezer_api_exc.DocumentExists(message=e.error)
raise freezer_api_exc.DocumentNotFound(
message=_i18n._('Unable to find action to update '
'with id %s') % action_id)
except Exception:
raise freezer_api_exc.StorageEngineError(
message=_i18n._(
'Unable to update action with id %s') % action_id)
return version
class SessionTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"session_id": doc_id}})
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
def update(self, session_id, session_update_doc):
# remove _version from the document
session_update_doc.pop('_version', 0)
update_doc = {"doc": session_update_doc}
try:
res = self.es.update(index=self.index, doc_type=self.doc_type,
id=session_id, body=update_doc)
version = res['_version']
self.es.indices.refresh(index=self.index)
except elasticsearch.TransportError as e:
if e.status_code == 409:
raise freezer_api_exc.DocumentExists(message=e.error)
raise freezer_api_exc.DocumentNotFound(
message=_i18n._('Unable to update session '
'%(id)s %(e)s') % {'id': session_id, 'e': e}
)
except Exception:
raise freezer_api_exc.StorageEngineError(
message=_i18n._(
'Unable to update session with id %s') % session_id)
return version
class ElasticSearchEngine(object):
_OPTS = [
cfg.ListOpt('hosts',
default=['http://127.0.0.1:9200'],
help='specify the storage hosts'),
cfg.StrOpt('index',
default='freezer',
help='specify the name of the elasticsearch index'),
cfg.IntOpt('timeout',
default=60,
help='specify the connection timeout'),
cfg.IntOpt('retries',
default=20,
help='number of retries to allow before raising and error'),
cfg.BoolOpt('use_ssl',
default=False,
help='explicitly turn on SSL'),
cfg.BoolOpt('verify_certs',
default=False,
help='turn on SSL certs verification'),
cfg.StrOpt('ca_certs',
help='path to CA certs on disk'),
cfg.IntOpt('number_of_replicas',
default=0,
help='Number of replicas for elk cluster. Default is 0. '
'Use 0 for no replicas. This should be set to (number '
'of node in the ES cluter -1).')
]
def __init__(self, backend):
"""backend: name of the section in the config file to load
elasticsearch opts
"""
self.index = None
self.es = None
self.backup_manager = None
self.client_manager = None
self.job_manager = None
self.action_manager = None
self.session_manager = None
# register elasticsearch opts
CONF.register_opts(self._OPTS, group=backend)
self.conf = dict(CONF.get(backend))
self.backend = backend
self._validate_opts()
self.init(**self.conf)
def _validate_opts(self):
if not 'hosts' or 'endpoint' in self.conf.keys():
raise ValueError("Couldn't find hosts in {0} section".format(
self.backend)
)
if self.conf.get('ca_certs'):
if not os.path.isfile(self.conf.get('ca_certs')):
raise Exception("File not found: ca_certs file ({0}) not "
"found".format(self.conf.get('ca_certs')))
def get_opts(self):
return self._OPTS
def init(self, index='freezer', **kwargs):
self.index = index
self.es = elasticsearch.Elasticsearch(**kwargs)
logging.info('Storage backend: Elasticsearch '
'at %s' % kwargs['hosts'])
self.backup_manager = BackupTypeManager(self.es, 'backups')
self.client_manager = ClientTypeManager(self.es, 'clients')
self.job_manager = JobTypeManager(self.es, 'jobs')
self.action_manager = ActionTypeManager(self.es, 'actions')
self.session_manager = SessionTypeManager(self.es, 'sessions')
def get_backup(self, user_id, backup_id):
return self.backup_manager.get(user_id, backup_id)
def search_backup(self, user_id, offset=0, limit=10, search=None):
search = search or {}
return self.backup_manager.search(user_id,
search=search,
offset=offset,
limit=limit)
def add_backup(self, user_id, user_name, doc):
# raises if data is malformed (HTTP_400) or already present (HTTP_409)
backup_metadata_doc = utils.BackupMetadataDoc(user_id, user_name, doc)
if not backup_metadata_doc.is_valid():
raise freezer_api_exc.BadDataFormat(
message=_i18n._('Bad Data Format'))
backup_id = backup_metadata_doc.backup_id
self.backup_manager.insert(backup_metadata_doc.serialize(), backup_id)
return backup_id
def delete_backup(self, user_id, backup_id):
return self.backup_manager.delete(user_id, backup_id)
def get_client(self, user_id, client_id=None,
offset=0, limit=10, search=None):
search = search or {}
return self.client_manager.search(user_id,
client_id,
search=search,
offset=offset,
limit=limit)
def add_client(self, user_id, doc):
client_doc = utils.ClientDoc.create(doc, user_id)
client_id = client_doc['client']['client_id']
existing = self.client_manager.search(user_id, client_id)
if existing:
raise freezer_api_exc.DocumentExists(
message=_i18n._(
'Client already registered with ID %s') % client_id)
self.client_manager.insert(client_doc)
logging.info('Client registered, client_id: %s' % client_id)
return client_id
def delete_client(self, user_id, client_id):
return self.client_manager.delete(user_id, client_id)
def get_job(self, user_id, job_id):
return self.job_manager.get(user_id, job_id)
def search_job(self, user_id, offset=0, limit=10, search=None):
search = search or {}
return self.job_manager.search(user_id,
search=search,
offset=offset,
limit=limit)
def add_job(self, user_id, doc):
jobdoc = utils.JobDoc.create(doc, user_id)
job_id = jobdoc['job_id']
self.job_manager.insert(jobdoc, job_id)
logging.info('Job registered, job id: %s' % job_id)
return job_id
def delete_job(self, user_id, job_id):
return self.job_manager.delete(user_id, job_id)
def update_job(self, user_id, job_id, patch_doc):
valid_patch = utils.JobDoc.create_patch(patch_doc)
# check that document exists
assert (self.job_manager.get(user_id, job_id))
version = self.job_manager.update(job_id, valid_patch)
logging.info('Job %(id)s updated to version %(version)s' %
{'id': job_id, 'version': version})
return version
def replace_job(self, user_id, job_id, doc):
# check that no document exists with
# same job_id and different user_id
try:
self.job_manager.get(user_id, job_id)
except freezer_api_exc.DocumentNotFound:
pass
valid_doc = utils.JobDoc.update(doc, user_id, job_id)
(created, version) = self.job_manager.insert(valid_doc, job_id)
if created:
logging.info('Job %s created' % job_id)
else:
logging.info(
'Job %(id)s replaced with version %(version)s' %
{'id': job_id, 'version': version})
return version
def get_action(self, user_id, action_id):
return self.action_manager.get(user_id, action_id)
def search_action(self, user_id, offset=0, limit=10, search=None):
search = search or {}
return self.action_manager.search(user_id,
search=search,
offset=offset,
limit=limit)
def add_action(self, user_id, doc):
actiondoc = utils.ActionDoc.create(doc, user_id)
action_id = actiondoc['action_id']
self.action_manager.insert(actiondoc, action_id)
logging.info('Action registered, action id: %s' % action_id)
return action_id
def delete_action(self, user_id, action_id):
return self.action_manager.delete(user_id, action_id)
def update_action(self, user_id, action_id, patch_doc):
valid_patch = utils.ActionDoc.create_patch(patch_doc)
# check that document exists
assert (self.action_manager.get(user_id, action_id))
version = self.action_manager.update(action_id, valid_patch)
logging.info(
'Action %(id)s updated to version %(version)s' %
{'id': action_id, 'version': version})
return version
def replace_action(self, user_id, action_id, doc):
# check that no document exists with
# same action_id and different user_id
try:
self.action_manager.get(user_id, action_id)
except freezer_api_exc.DocumentNotFound:
pass
valid_doc = utils.ActionDoc.update(doc, user_id, action_id)
(created, version) = self.action_manager.insert(valid_doc, action_id)
if created:
logging.info('Action %s created' % action_id)
else:
logging.info(
'Action %(id)s replaced with version %(version)s'
% {'id': action_id, 'version': version})
return version
def get_session(self, user_id, session_id):
return self.session_manager.get(user_id, session_id)
def search_session(self, user_id, offset=0, limit=10, search=None):
search = search or {}
return self.session_manager.search(user_id,
search=search,
offset=offset,
limit=limit)
def add_session(self, user_id, doc):
session_doc = utils.SessionDoc.create(doc, user_id)
session_id = session_doc['session_id']
self.session_manager.insert(session_doc, session_id)
logging.info(
'Session registered, session id: %s' % session_id)
return session_id
def delete_session(self, user_id, session_id):
return self.session_manager.delete(user_id, session_id)
def update_session(self, user_id, session_id, patch_doc):
valid_patch = utils.SessionDoc.create_patch(patch_doc)
# check that document exists
assert (self.session_manager.get(user_id, session_id))
version = self.session_manager.update(session_id, valid_patch)
logging.info(
'Session %(id)s updated to version %(version)s' %
{'id': session_id, 'version': version})
return version
def replace_session(self, user_id, session_id, doc):
# check that no document exists with
# same session_id and different user_id
try:
self.session_manager.get(user_id, session_id)
except freezer_api_exc.DocumentNotFound:
pass
valid_doc = utils.SessionDoc.update(doc, user_id, session_id)
(created, version) = self.session_manager.insert(valid_doc, session_id)
if created:
logging.info('Session %s created' % session_id)
else:
logging.info(
'Session %(id)s replaced with version %(version)s'
% {'id': session_id, 'version': version})
return version
| |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest.api.network import base_security_groups as base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class SecGroupTest(base.BaseSecGroupTest):
_interface = 'json'
_tenant_network_cidr = CONF.network.tenant_network_cidr
@classmethod
def resource_setup(cls):
super(SecGroupTest, cls).resource_setup()
if not test.is_extension_enabled('security-group', 'network'):
msg = "security-group extension not enabled."
raise cls.skipException(msg)
def _create_verify_security_group_rule(self, sg_id, direction,
ethertype, protocol,
port_range_min,
port_range_max,
remote_group_id=None,
remote_ip_prefix=None):
# Create Security Group rule with the input params and validate
# that SG rule is created with the same parameters.
resp, rule_create_body = self.client.create_security_group_rule(
security_group_id=sg_id,
direction=direction,
ethertype=ethertype,
protocol=protocol,
port_range_min=port_range_min,
port_range_max=port_range_max,
remote_group_id=remote_group_id,
remote_ip_prefix=remote_ip_prefix
)
sec_group_rule = rule_create_body['security_group_rule']
self.addCleanup(self._delete_security_group_rule,
sec_group_rule['id'])
expected = {'direction': direction, 'protocol': protocol,
'ethertype': ethertype, 'port_range_min': port_range_min,
'port_range_max': port_range_max,
'remote_group_id': remote_group_id,
'remote_ip_prefix': remote_ip_prefix}
for key, value in six.iteritems(expected):
self.assertEqual(value, sec_group_rule[key],
"Field %s of the created security group "
"rule does not match with %s." %
(key, value))
@test.attr(type='smoke')
def test_list_security_groups(self):
# Verify the that security group belonging to tenant exist in list
_, body = self.client.list_security_groups()
security_groups = body['security_groups']
found = None
for n in security_groups:
if (n['name'] == 'default'):
found = n['id']
msg = "Security-group list doesn't contain default security-group"
self.assertIsNotNone(found, msg)
@test.attr(type='smoke')
def test_create_list_update_show_delete_security_group(self):
group_create_body, name = self._create_security_group()
# List security groups and verify if created group is there in response
_, list_body = self.client.list_security_groups()
secgroup_list = list()
for secgroup in list_body['security_groups']:
secgroup_list.append(secgroup['id'])
self.assertIn(group_create_body['security_group']['id'], secgroup_list)
# Update the security group
new_name = data_utils.rand_name('security-')
new_description = data_utils.rand_name('security-description')
_, update_body = self.client.update_security_group(
group_create_body['security_group']['id'],
name=new_name,
description=new_description)
# Verify if security group is updated
self.assertEqual(update_body['security_group']['name'], new_name)
self.assertEqual(update_body['security_group']['description'],
new_description)
# Show details of the updated security group
resp, show_body = self.client.show_security_group(
group_create_body['security_group']['id'])
self.assertEqual(show_body['security_group']['name'], new_name)
self.assertEqual(show_body['security_group']['description'],
new_description)
@test.attr(type='smoke')
def test_create_show_delete_security_group_rule(self):
group_create_body, _ = self._create_security_group()
# Create rules for each protocol
protocols = ['tcp', 'udp', 'icmp']
for protocol in protocols:
_, rule_create_body = self.client.create_security_group_rule(
security_group_id=group_create_body['security_group']['id'],
protocol=protocol,
direction='ingress',
ethertype=self.ethertype
)
# Show details of the created security rule
_, show_rule_body = self.client.show_security_group_rule(
rule_create_body['security_group_rule']['id']
)
create_dict = rule_create_body['security_group_rule']
for key, value in six.iteritems(create_dict):
self.assertEqual(value,
show_rule_body['security_group_rule'][key],
"%s does not match." % key)
# List rules and verify created rule is in response
_, rule_list_body = self.client.list_security_group_rules()
rule_list = [rule['id']
for rule in rule_list_body['security_group_rules']]
self.assertIn(rule_create_body['security_group_rule']['id'],
rule_list)
@test.attr(type='smoke')
def test_create_security_group_rule_with_additional_args(self):
"""Verify security group rule with additional arguments works.
direction:ingress, ethertype:[IPv4/IPv6],
protocol:tcp, port_range_min:77, port_range_max:77
"""
group_create_body, _ = self._create_security_group()
sg_id = group_create_body['security_group']['id']
direction = 'ingress'
protocol = 'tcp'
port_range_min = 77
port_range_max = 77
self._create_verify_security_group_rule(sg_id, direction,
self.ethertype, protocol,
port_range_min,
port_range_max)
@test.attr(type='smoke')
def test_create_security_group_rule_with_icmp_type_code(self):
"""Verify security group rule for icmp protocol works.
Specify icmp type (port_range_min) and icmp code
(port_range_max) with different values. A seperate testcase
is added for icmp protocol as icmp validation would be
different from tcp/udp.
"""
group_create_body, _ = self._create_security_group()
sg_id = group_create_body['security_group']['id']
direction = 'ingress'
protocol = 'icmp'
icmp_type_codes = [(3, 2), (2, 3), (3, 0), (2, None)]
for icmp_type, icmp_code in icmp_type_codes:
self._create_verify_security_group_rule(sg_id, direction,
self.ethertype, protocol,
icmp_type, icmp_code)
@test.attr(type='smoke')
def test_create_security_group_rule_with_remote_group_id(self):
# Verify creating security group rule with remote_group_id works
sg1_body, _ = self._create_security_group()
sg2_body, _ = self._create_security_group()
sg_id = sg1_body['security_group']['id']
direction = 'ingress'
protocol = 'udp'
port_range_min = 50
port_range_max = 55
remote_id = sg2_body['security_group']['id']
self._create_verify_security_group_rule(sg_id, direction,
self.ethertype, protocol,
port_range_min,
port_range_max,
remote_group_id=remote_id)
@test.attr(type='smoke')
def test_create_security_group_rule_with_remote_ip_prefix(self):
# Verify creating security group rule with remote_ip_prefix works
sg1_body, _ = self._create_security_group()
sg_id = sg1_body['security_group']['id']
direction = 'ingress'
protocol = 'tcp'
port_range_min = 76
port_range_max = 77
ip_prefix = self._tenant_network_cidr
self._create_verify_security_group_rule(sg_id, direction,
self.ethertype, protocol,
port_range_min,
port_range_max,
remote_ip_prefix=ip_prefix)
class SecGroupTestXML(SecGroupTest):
_interface = 'xml'
class SecGroupIPv6Test(SecGroupTest):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
@classmethod
def resource_setup(cls):
if not CONF.network_feature_enabled.ipv6:
skip_msg = "IPv6 Tests are disabled."
raise cls.skipException(skip_msg)
super(SecGroupIPv6Test, cls).resource_setup()
class SecGroupIPv6TestXML(SecGroupIPv6Test):
_interface = 'xml'
| |
import wx
import sqlite3
import wx.grid as gridlib
conn = sqlite3.connect('mydb.db')
########################################################################
class App1(wx.Frame):
#----------------------------------------------------------------------
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, "List Control", size=(600,520))
# Add a panel so it looks the correct on all platforms
panel = wx.Panel(self, wx.ID_ANY)
self.index = 0
fgs = wx.FlexGridSizer(4, 2, 9, 25)
student_id = wx.StaticText(panel, label="Student ID")
first_name = wx.StaticText(panel, label="First Name")
last_name = wx.StaticText(panel, label="Last Name")
blankLbl = wx.StaticText(panel, label="")
self.tc1 = wx.TextCtrl(panel)
self.tc2 = wx.TextCtrl(panel)
self.tc3 = wx.TextCtrl(panel)
searchbtn = wx.Button(panel, label="Search")
searchbtn.Bind(wx.EVT_BUTTON, self.search_record)
fgs.AddMany([(student_id), (self.tc1, 1, wx.EXPAND), (first_name),
(self.tc2, 1, wx.EXPAND), (last_name, 1, wx.EXPAND), (self.tc3, 1, wx.EXPAND), (blankLbl, 1, wx.EXPAND), (searchbtn, 1, wx.EXPAND)])
#fgs.Add(addbtn)
fgs.AddGrowableRow(2, 1)
fgs.AddGrowableCol(1, 1)
self.list_ctrl = wx.ListCtrl(panel, size=(500,300),
style=wx.LC_REPORT
|wx.BORDER_SUNKEN
)
self.list_ctrl.InsertColumn(0, 'Student ID')
self.list_ctrl.SetColumnWidth(0, 100)
self.list_ctrl.InsertColumn(1, 'First Name')
self.list_ctrl.SetColumnWidth(1, 100)
self.list_ctrl.InsertColumn(2, 'Last Name')
self.list_ctrl.SetColumnWidth(2, 100)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnRowClick, self.list_ctrl)
'''self.list_ctrl = wx.grid.Grid(self, size=(500,500))
self.list_ctrl.CreateGrid(50, 5)
#self.list_ctrl.SetRowSize(0, 60)
#self.list_ctrl.SetColSize(0, 120)'''
btn = wx.Button(panel, label="Add New Record")
btn.Bind(wx.EVT_BUTTON, self.add_new_record)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(fgs, proportion=1, flag=wx.ALL|wx.EXPAND, border=15)
sizer.Add(btn, 0, wx.ALL|wx.CENTER, 5)
sizer.Add(self.list_ctrl, 0, wx.ALL|wx.EXPAND, 5)
panel.SetSizer(sizer)
self.initFromDB()
def search_record(self, event):
dbq = conn.cursor()
tc1 = self.tc1.GetValue()
tc2 = self.tc2.GetValue()
tc3 = self.tc3.GetValue()
rs = dbq.execute("SELECT * FROM student WHERE id = ?", (selectedStudentId,))
dataSet = rs.fetchone()
def OnRowClick(self, event):
selectedStudentId = str(self.idSet[event.GetIndex()])
dbq = conn.cursor()
rs = dbq.execute("SELECT * FROM student WHERE id = ?", (selectedStudentId,))
dataSet = rs.fetchone()
appForm = AppForm(self, dataSet)
print dataSet
def initFromDB(self):
dbq = conn.cursor()
rs = dbq.execute("SELECT * FROM student")
dataSet = rs.fetchall()
self.index = 0
self.idSet = []
for row in dataSet:
self.list_ctrl.InsertStringItem(self.index, str(row[0]))
self.list_ctrl.SetStringItem(self.index, 1, row[1])
self.list_ctrl.SetStringItem(self.index, 2, row[2])
'''self.list_ctrl.SetCellValue(self.index, 0, str(row[0]))
self.list_ctrl.SetCellValue(self.index, 1, row[1])
self.list_ctrl.SetCellValue(self.index, 2, row[2])'''
self.idSet.append(row[0])
self.index += 1
def add_new_record(self, event):
appForm = AppForm(self)
#----------------------------------------------------------------------
def add_line(self, event):
line = "Line %s" % self.index
self.list_ctrl.InsertStringItem(self.index, line)
self.list_ctrl.SetStringItem(self.index, 1, "01/19/2010")
self.list_ctrl.SetStringItem(self.index, 2, "USA")
self.index += 1
class AppForm(wx.Frame):
#----------------------------------------------------------------------
def __init__(self, parent, editData = False):
wx.Frame.__init__(self, None, wx.ID_ANY, size=(500,200))
panel=wx.Panel(self, -1)
self.parent = parent
self.editMode = False
hbox = wx.BoxSizer(wx.HORIZONTAL)
fgs = wx.FlexGridSizer(4, 2, 9, 25)
student_id = wx.StaticText(panel, label="Student ID")
first_name = wx.StaticText(panel, label="First Name")
last_name = wx.StaticText(panel, label="Last Name")
blankLbl = wx.StaticText(panel, label="")
self.tc1 = wx.TextCtrl(panel)
self.tc2 = wx.TextCtrl(panel)
self.tc3 = wx.TextCtrl(panel)
if editData != False:
self.tc1.SetValue(str(editData[0]))
self.tc2.SetValue(editData[1])
self.tc3.SetValue(editData[2])
self.editMode = True
managebtn = wx.Button(panel, label="Update")
managebtn.Bind(wx.EVT_BUTTON, self.edit_record)
else:
managebtn = wx.Button(panel, label="Add")
managebtn.Bind(wx.EVT_BUTTON, self.add_record)
fgs.AddMany([(student_id), (self.tc1, 1, wx.EXPAND), (first_name),
(self.tc2, 1, wx.EXPAND), (last_name, 1, wx.EXPAND), (self.tc3, 1, wx.EXPAND), (blankLbl, 1, wx.EXPAND), (managebtn, 1, wx.EXPAND)])
#fgs.Add(addbtn)
fgs.AddGrowableRow(2, 1)
fgs.AddGrowableCol(1, 1)
hbox.Add(fgs, proportion=1, flag=wx.ALL|wx.EXPAND, border=15)
panel.SetSizer(hbox)
self.SetBackgroundColour(wx.Colour(100,100,100))
self.Centre()
self.Show()
def add_record(self, event):
try:
dbq = conn.cursor()
tc1 = self.tc1.GetValue()
tc2 = self.tc2.GetValue()
tc3 = self.tc3.GetValue()
st = self.checkValidations({'tc1' : tc1, "tc2" : tc2, "tc3" : tc3})
if st == True:
qry = dbq.execute("INSERT INTO student VALUES (?, ?, ?) ", (self.tc1.GetValue(), self.tc2.GetValue(), self.tc3.GetValue()))
conn.commit()
self.parent.list_ctrl.DeleteAllItems()
'''self.parent.list_ctrl.InsertColumn(0, 'Student ID')
self.parent.list_ctrl.SetColumnWidth(0, 100)
self.parent.list_ctrl.InsertColumn(1, 'First Name')
self.parent.list_ctrl.SetColumnWidth(1, 100)
self.parent.list_ctrl.InsertColumn(2, 'Last Name')
self.parent.list_ctrl.SetColumnWidth(2, 100)'''
self.parent.initFromDB()
self.Close(True)
#res = qry.fetchall()
except ValueError as e:
self.Warn(self.parent, "Something issue")
def edit_record(self, event):
try:
dbq = conn.cursor()
tc1 = self.tc1.GetValue()
tc2 = self.tc2.GetValue()
tc3 = self.tc3.GetValue()
st = True
if st == True:
qry = dbq.execute("UPDATE student SET first_name = ?, last_name = ? WHERE id = ? ", (self.tc2.GetValue(), self.tc3.GetValue(), self.tc1.GetValue()))
conn.commit()
self.parent.list_ctrl.DeleteAllItems()
self.parent.initFromDB()
self.Close(True)
except ValueError as e:
self.Warn(self.parent, "Something issue")
def checkValidations(self, data):
#print data.keys()
#print type(data['tc1'])
#print isinstance(data['tc1'], int)
#print data['tc1'].isdigit()
#print isinstance(tc1, int)
#print isinstance(tc1, basestring)
try:
if data['tc1'].isdigit() == False:
raise ValueError("Student ID must be number.")
dbq = conn.cursor()
rs = dbq.execute("SELECT * FROM student WHERE id = ?", (data['tc1'],))
val = rs.fetchone()
if val is not None:
raise ValueError('Student ID is already exists ')
return True
except ValueError as e:
self.Warn(self, str(e))
return False
def Warn(self, parent, message, caption = 'Warning!'):
dlg = wx.MessageDialog(parent, message, caption, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
#----------------------------------------------------------------------
# Run the program
if __name__ == "__main__":
app = wx.App(False)
frame = App1()
frame.Show()
app.MainLoop()
| |
## @package image
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
## @brief Contains some utility methods for handling images
from __future__ import division
from random import random
from PIL import Image
from .enhanced_grid import *
## Converts a sequence of floats (ranged 0 to 1) to integers (ranged 0 to 155).
def int_sequence(seq):
new_seq = []
for element in seq:
new_seq.append(int(255 * element))
return tuple(new_seq)
## Converts a sequence of integers (ranged 0 to 255) to floats (ranged 0 to 1).
def float_sequence(seq):
new_seq = []
for element in seq:
new_seq.append(element / 255)
return tuple(new_seq)
## Converts a 2D grid to an image on the disk.
# The grid must contain float values (ranged 0 to 1).
def grid_to_rgb_image(grid, fname):
image = Image.new('RGBA', grid.dims)
pixels = image.load()
for index in grid.index_iter():
pixels[index] = int_sequence(grid[index])
image.save(fname)
## Converts a 2D grid to an image on the disk.
# The grid must contain float values (ranged 0 to 1).
def grid_to_greyscale_image(grid, fname):
image = Image.new('RGBA', grid.dims)
pixels = image.load()
for index in grid.index_iter():
pixels[index] = int_sequence(grid[index])
image = image.convert("LA")
image.save(fname)
## Loads an image from disk, and returns a tuplet of grids, one for each channel.
# The alpha channel is ignored.
def rgb_image_to_image_grid_channels(fname):
image = Image.open(fname)
pix = image.load()
grids = (Grid2D(image.size, 0), Grid2D(image.size, 0), Grid2D(image.size, 0))
for index in grids[0].index_iter():
grids[0][index] = pix[index][0] / 255
grids[1][index] = pix[index][1] / 255
grids[2][index] = pix[index][2] / 255
return grids
## Converts a list of three grids into an RGB grid. The three grids each
# represents a color channel, and should contain floating point values between
# 0 and 1.
# The resulting grid contains tuples, each with four elements, between 0 and 1,
# representing RGBA. Alpha is laways set to 1.
def channels_to_rgb_grid(grids):
red_grid, green_grid, blue_grid = grids
new_grid = Grid2D(red_grid.dims)
for index in new_grid.index_iter():
new_grid[index] = (red_grid[index], green_grid[index], blue_grid[index], 1)
return new_grid
## Loads an image from disk into a grid.
def rgb_image_to_grid(fname):
image = Image.open(fname)
pix = image.load()
grid = Grid2D(image.size, 0)
for index in grid.index_iter():
grid[index] = float_sequence(pix[index])
return grid
## Take a grid containing non-negative integers, and makes a new grid with
# elements from the pallet. If the old grid had an element 3 in a cell, then
# the element in the corresponding cell in the new grid will hold the value of
# the 4th element of the pallet.
def index_grid_to_rgb_grid(grid, pallet):
new_grid = Grid2D(grid.dims)
for index in new_grid.index_iter():
new_grid[index] = pallet[grid[index]]
return new_grid
def transpose(grid):
w, h = grid.dims
new_grid = Grid2D((h, w))
for index in grid.index_iter():
x, y = index
new_grid[y, x] = grid[index]
return new_grid
## Converts a greyscale grid (every cell contains a float [0..1]) into an RGB
# grid. The given color is multiplied by the value in the grid to give the value
# of the corresponding cell in the new grid.
def greyscale_grid_to_rgb_grid(grid, color):
red, green, blue, alpha = color
new_grid = Grid2D(grid.dims, None)
for index in grid.index_iter():
value = grid[index]
new_grid[index] = (value * red, value * green, value * blue, alpha)
return new_grid
def rgb_grid_to_greyscale_grid(grid):
new_grid = Grid2D(grid.dims, None)
for index in grid.index_iter():
red, green, blue, alpha = grid[index]
new_grid[index] = (red + green + blue) / 3
return new_grid
## An extremely simple edge detection algorithm.
def edge(grid):
new_grid = Grid2D(grid.dims, (0, 0, 0, 1))
for i in range(1, grid.dims[0]):
for j in range(1, grid.dims[1]):
red, green, blue, alpha = grid[i, j]
red1, green1, blue1, alpha1 = grid[i - 1, j]
red2, green2, blue2, alpha2 = grid[i, j - 1]
red3, green3, blue3, alpha3 = grid[i - 1, j - 1]
redd = abs(red - red1) + abs(red - red2) + abs(red - red3)
greend = abs(green - green1) + abs(green - green2) + abs(green - green3)
blued = abs(blue - blue1) + abs(blue - blue2) + abs(blue - blue3)
avr = (redd + greend + blued)#/8
new_grid[i, j] = (avr, avr, avr, 1)
return new_grid
## Returns a grid in which the colours have been normalised.
# Colours are adjusted so that the returned grid has a minimum
# color component of 0, and a maximum component of 1.
def normalize(grid):
max_lum = max(grid[0, 0])
min_lum = min(grid[0, 0])
for cell in grid.cell_iter():
red, green, blue, alpha = cell
max_lum = max(red, green, blue, max_lum)
min_lum = min(red, green, blue, min_lum)
if max_lum == min_lum:
multiplier = 1
min_lum = 0
max_lum = 0
else:
multiplier = 1 / (max_lum - min_lum)
new_grid = Grid2D(grid.dims)
for index in grid.index_iter():
red, green, blue, alpha = grid[index]
red = (red - min_lum) * multiplier
green = (green - min_lum) * multiplier
blue = (blue - min_lum) * multiplier
new_grid[index] = (red, green, blue, alpha)
return new_grid
## Returns a grid where all values have been normalised between 0 and 1.
def normalize_grey(grid):
max_lum = grid[0, 0]
min_lum = grid[0, 0]
for cell in grid.cell_iter():
max_lum = max(cell, max_lum)
min_lum = min(cell, min_lum)
if max_lum == min_lum:
multiplier = 1
min_lum = 0
max_lum = 0
else:
multiplier = 1 / (max_lum - min_lum)
new_grid = Grid2D(grid.dims)
for index in grid.index_iter():
new_grid[index] = (grid[index] - min_lum) * multiplier
return new_grid
## Clamps all values in a grid between 0 and 1
def saturate(grid):
new_grid = Grid2D(grid.dims)
for index in grid.index_iter():
new_grid[index] = max(min(grid[index], 1), 0)
return new_grid
def threshold(grid, value):
new_grid = Grid2D(grid.dims)
for index in grid.index_iter():
new_grid[index] = 1 if grid[index] >= value else 0
return new_grid
## Multiplies all values of a grid with a constant. Returns the result as a new grid.
def multiply_grid(grid, factor):
new_grid = Grid2D(grid.dims)
for index in grid.index_iter():
new_grid[index] = grid[index] * factor
return new_grid
## Adss a constant to all values of a grid. Returns the result as a new grid.
def add_grid(grid, summand):
new_grid = Grid2D(grid.dims)
for index in grid.index_iter():
new_grid[index] = grid[index] + summand
return new_grid
## Returns a grid that represents the entropy around the pixels in a given grid.
# The entropy around a pixel is measured as the sum of the absolute differences
# between that pixel's channels and the surrounding pixels' corresponding channels.
#
# @param grid The grid to measure
#
# @param n Determines the size of the window around eaxh pixel to use. If n is 1,
# the window size is 3by3, if it is 2, the window size is 5 by 5.
def entropy(grid, n):
new_grid = Grid2D(grid.dims, (0, 0, 0, 1))
for index in grid.index_iter():
red, green, blue, alpha = grid[index]
red_sum, green_sum, blue_sum = 0, 0, 0
for cell in grid.square_iter(index, n):
red1, green1, blue1, alpha1 = cell
red_sum += abs(red - red1)
green_sum += abs(green - green1)
blue_sum += abs(blue - blue1)
new_grid[index] = (red_sum, green_sum, blue_sum, alpha)
return new_grid
##@brief Returns a grid containing white noise in the range [0 1]
# @param dims
# The dimensions of the grid to return (as a xy-tuple).
# @param period
# This is the period of different noise samples. For example,
# if the period is 2, then 2-by-2 windows in the grid will have
# the same value.
def white_noise(dims, period=1):
grid = Grid2D(dims)
width, height = dims
for i in range(0, width, period):
for j in range(0, height, period):
r = random()
for index in grid.window_index_iter((i, j), (i + period, j+period)):
grid[index] = r
return grid
## Interpolates a grey between two given greys.
# If t is set to 0, the returned grey is the same
# as col1. If t is set to 1, the returned grey
# is the same as col2.
def mix_grey(col1, col2, t):
return col1 * (1 - t) + col2 * t
## Interpolates a color between two given colors.
# If t is set to 0, the returned grey is the same
# as col1. If t is set to 1, the returned grey
# is the same as col2.
def mix_color(color0, color1, t):
new_color = []
for channel0, channel1 in zip(color0, color1):
new_color.append(channel0 * (1 - t) + channel1 * t)
return new_color
## Returns a new grid, containing m by n copies
# of the given grid.
def tile(grid, m, n):
width, height = grid.dims
dims = width * m, height * n
new_grid = Grid2D(dims)
for index in new_grid.index_iter():
x, y, = index
new_grid[index] = grid[x % width, y % height]
return new_grid
## Adds weighted values of grids together.
def add_grey_grids(grids, factors = None):
new_grid = Grid2D(grids[0].dims, 0)
if factors == None:
factors = [1] * len(grids)
for grid, factor in zip(grids, factors):
for index in grid.index_iter():
new_grid[index] += grid[index] * factor
return new_grid
## Calculates the integral grid of a grid.
def integrate(grid):
new_grid = Grid2D(grid.dims, 0)
new_grid[0, 0] = grid[0, 0]
for i in xrange(1, grid.width):
new_grid[i, 0] = new_grid[i-1, 0] + grid[i, 0]
for j in xrange(1, grid.height):
new_grid[0, j] = new_grid[i, j-1] + grid[0, j]
for i in xrange(1, grid.width):
for j in xrange(1, grid.height):
new_grid[i, j] = new_grid[i-1, j] + new_grid[i, j-1] - new_grid[i-1, j-1] + grid[i, j]
return new_grid
def integrate_vertically(grid):
w, h = grid.dims
new_grid = Grid2D((w, h), 0)
for i in range(w):
new_grid[i, 0] = grid[i, 0]
for i in range(w):
for j in range(1, h):
new_grid[i, j] = grid[i, j] + new_grid[i, j - 1]
return new_grid
def blend(grid1, grid2, blend_image):
dims = blend_image.dims
grid = Grid2D(dims)
for index in grid.index_iter():
grid[index] = mix_color(grid1[index], grid2[index], blend_image[index])
return grid
| |
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions shared by various parts of the code generator.
Extends IdlTypeBase type with |enum_validation_expression| property.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import re
from idl_types import IdlTypeBase
import idl_types
from idl_definitions import Exposure, IdlInterface
from v8_globals import includes
import v8_types
ACRONYMS = [
'CSSOM', # must come *before* CSS to match full acronym
'CSS',
'HTML',
'IME',
'JS',
'SVG',
'URL',
'WOFF',
'XML',
'XSLT',
]
################################################################################
# Extended attribute parsing
################################################################################
def extended_attribute_value_contains(extended_attribute_value, key):
return (extended_attribute_value == key or
(isinstance(extended_attribute_value, list) and
key in extended_attribute_value))
def has_extended_attribute(definition_or_member, extended_attribute_list):
return any(extended_attribute in definition_or_member.extended_attributes
for extended_attribute in extended_attribute_list)
def has_extended_attribute_value(definition_or_member, name, value):
extended_attributes = definition_or_member.extended_attributes
return (name in extended_attributes and
extended_attribute_value_contains(extended_attributes[name], value))
def extended_attribute_value_as_list(definition_or_member, name):
extended_attributes = definition_or_member.extended_attributes
if name not in extended_attributes:
return None
value = extended_attributes[name]
if isinstance(value, list):
return value
return [value]
################################################################################
# String handling
################################################################################
def capitalize(name):
"""Capitalize first letter or initial acronym (used in setter names)."""
for acronym in ACRONYMS:
if name.startswith(acronym.lower()):
return name.replace(acronym.lower(), acronym)
return name[0].upper() + name[1:]
def strip_suffix(string, suffix):
if not suffix or not string.endswith(suffix):
return string
return string[:-len(suffix)]
def uncapitalize(name):
"""Uncapitalizes first letter or initial acronym (used in method names).
E.g., 'SetURL' becomes 'setURL', but 'URLFoo' becomes 'urlFoo'.
"""
for acronym in ACRONYMS:
if name.startswith(acronym):
return name.replace(acronym, acronym.lower())
return name[0].lower() + name[1:]
################################################################################
# C++
################################################################################
def enum_validation_expression(idl_type):
# FIXME: Add IdlEnumType, move property to derived type, and remove this check
if not idl_type.is_enum:
return None
return ' || '.join(['string == "%s"' % enum_value
for enum_value in idl_type.enum_values])
IdlTypeBase.enum_validation_expression = property(enum_validation_expression)
def scoped_name(interface, definition, base_name):
if 'ImplementedInPrivateScript' in definition.extended_attributes:
return '%s::PrivateScript::%s' % (v8_class_name(interface), base_name)
# partial interfaces are implemented as separate classes, with their members
# implemented as static member functions
partial_interface_implemented_as = definition.extended_attributes.get('PartialInterfaceImplementedAs')
if partial_interface_implemented_as:
return '%s::%s' % (partial_interface_implemented_as, base_name)
if (definition.is_static or
definition.name in ('Constructor', 'NamedConstructor')):
return '%s::%s' % (cpp_name(interface), base_name)
return 'impl->%s' % base_name
def v8_class_name(interface):
return v8_types.v8_type(interface.name)
def v8_class_name_or_partial(interface):
class_name = v8_class_name(interface)
if interface.is_partial:
return ''.join([class_name, 'Partial'])
return class_name
################################################################################
# Specific extended attributes
################################################################################
# [ActivityLogging]
def activity_logging_world_list(member, access_type=''):
"""Returns a set of world suffixes for which a definition member has activity logging, for specified access type.
access_type can be 'Getter' or 'Setter' if only checking getting or setting.
"""
extended_attributes = member.extended_attributes
if 'LogActivity' not in extended_attributes:
return set()
log_activity = extended_attributes['LogActivity']
if log_activity and not log_activity.startswith(access_type):
return set()
includes.add('bindings/core/v8/V8DOMActivityLogger.h')
if 'LogAllWorlds' in extended_attributes:
return set(['', 'ForMainWorld'])
return set(['']) # At minimum, include isolated worlds.
# [ActivityLogging]
def activity_logging_world_check(member):
"""Returns if an isolated world check is required when generating activity
logging code.
The check is required when there is no per-world binding code and logging is
required only for isolated world.
"""
extended_attributes = member.extended_attributes
if 'LogActivity' not in extended_attributes:
return False
if ('PerWorldBindings' not in extended_attributes and
'LogAllWorlds' not in extended_attributes):
return True
return False
# [CallWith]
CALL_WITH_ARGUMENTS = {
'ScriptState': 'scriptState',
'ExecutionContext': 'executionContext',
'ScriptArguments': 'scriptArguments.release()',
'ActiveWindow': 'callingDOMWindow(info.GetIsolate())',
'FirstWindow': 'enteredDOMWindow(info.GetIsolate())',
'Document': 'document',
'ThisValue': 'ScriptValue(scriptState, info.This())',
}
# List because key order matters, as we want arguments in deterministic order
CALL_WITH_VALUES = [
'ScriptState',
'ExecutionContext',
'ScriptArguments',
'ActiveWindow',
'FirstWindow',
'Document',
'ThisValue',
]
def call_with_arguments(call_with_values):
if not call_with_values:
return []
return [CALL_WITH_ARGUMENTS[value]
for value in CALL_WITH_VALUES
if extended_attribute_value_contains(call_with_values, value)]
# [Conditional]
DELIMITER_TO_OPERATOR = {
'|': '||',
',': '&&',
}
def conditional_string(definition_or_member):
extended_attributes = definition_or_member.extended_attributes
if 'Conditional' not in extended_attributes:
return None
return 'ENABLE(%s)' % extended_attributes['Conditional']
# [DeprecateAs]
def deprecate_as(member):
extended_attributes = member.extended_attributes
if 'DeprecateAs' not in extended_attributes:
return None
includes.add('core/frame/UseCounter.h')
return extended_attributes['DeprecateAs']
# [Exposed]
EXPOSED_EXECUTION_CONTEXT_METHOD = {
'DedicatedWorker': 'isDedicatedWorkerGlobalScope',
'ServiceWorker': 'isServiceWorkerGlobalScope',
'SharedWorker': 'isSharedWorkerGlobalScope',
'Window': 'isDocument',
'Worker': 'isWorkerGlobalScope',
}
EXPOSED_WORKERS = set([
'DedicatedWorker',
'SharedWorker',
'ServiceWorker',
])
class ExposureSet:
"""An ExposureSet is a collection of Exposure instructions."""
def __init__(self, exposures=None):
self.exposures = set(exposures) if exposures else set()
def issubset(self, other):
"""Returns true if |self|'s exposure set is a subset of
|other|'s exposure set. This function doesn't care about
RuntimeEnabled."""
self_set = self._extended(set(e.exposed for e in self.exposures))
other_set = self._extended(set(e.exposed for e in other.exposures))
return self_set.issubset(other_set)
@staticmethod
def _extended(target):
if EXPOSED_WORKERS.issubset(target):
return target | set(['Worker'])
elif 'Worker' in target:
return target | EXPOSED_WORKERS
return target
def add(self, exposure):
self.exposures.add(exposure)
def __len__(self):
return len(self.exposures)
def __iter__(self):
return self.exposures.__iter__()
@staticmethod
def _code(exposure):
exposed = ('context->%s()' %
EXPOSED_EXECUTION_CONTEXT_METHOD[exposure.exposed])
if exposure.runtime_enabled is not None:
runtime_enabled = ('RuntimeEnabledFeatures::%sEnabled()' %
uncapitalize(exposure.runtime_enabled))
return '({0} && {1})'.format(exposed, runtime_enabled)
return exposed
def code(self):
if len(self.exposures) == 0:
return None
# We use sorted here to deflake output.
return ' || '.join(sorted(self._code(e) for e in self.exposures))
def exposed(member, interface):
"""Returns a C++ code that checks if a method/attribute/etc is exposed.
When the Exposed attribute contains RuntimeEnabledFeatures (i.e.
Exposed(Arguments) form is given), the code contains check for them as
well.
EXAMPLE: [Exposed=Window, RuntimeEnabledFeature=Feature1]
=> context->isDocument()
EXAMPLE: [Exposed(Window Feature1, Window Feature2)]
=> context->isDocument() && RuntimeEnabledFeatures::feature1Enabled() ||
context->isDocument() && RuntimeEnabledFeatures::feature2Enabled()
"""
exposure_set = ExposureSet(
extended_attribute_value_as_list(member, 'Exposed'))
interface_exposure_set = ExposureSet(
extended_attribute_value_as_list(interface, 'Exposed'))
for e in exposure_set:
if e.exposed not in EXPOSED_EXECUTION_CONTEXT_METHOD:
raise ValueError('Invalid execution context: %s' % e.exposed)
# Methods must not be exposed to a broader scope than their interface.
if not exposure_set.issubset(interface_exposure_set):
raise ValueError('Interface members\' exposure sets must be a subset of the interface\'s.')
return exposure_set.code()
# [GarbageCollected], [WillBeGarbageCollected]
def gc_type(definition):
extended_attributes = definition.extended_attributes
if 'GarbageCollected' in extended_attributes:
return 'GarbageCollectedObject'
elif 'WillBeGarbageCollected' in extended_attributes:
return 'WillBeGarbageCollectedObject'
return 'RefCountedObject'
# [ImplementedAs]
def cpp_name(definition_or_member):
extended_attributes = definition_or_member.extended_attributes
if 'ImplementedAs' not in extended_attributes:
return definition_or_member.name
return extended_attributes['ImplementedAs']
def cpp_name_from_interfaces_info(name, interfaces_info):
return interfaces_info.get(name, {}).get('implemented_as') or name
def cpp_name_or_partial(interface):
cpp_class_name = cpp_name(interface)
if interface.is_partial:
return ''.join([cpp_class_name, 'Partial'])
return cpp_class_name
# [MeasureAs]
def measure_as(definition_or_member):
extended_attributes = definition_or_member.extended_attributes
if 'MeasureAs' not in extended_attributes:
return None
includes.add('core/frame/UseCounter.h')
return extended_attributes['MeasureAs']
# [PerContextEnabled]
def per_context_enabled_function_name(definition_or_member):
extended_attributes = definition_or_member.extended_attributes
if 'PerContextEnabled' not in extended_attributes:
return None
feature_name = extended_attributes['PerContextEnabled']
return 'ContextFeatures::%sEnabled' % uncapitalize(feature_name)
# [RuntimeEnabled]
def runtime_enabled_function_name(definition_or_member):
"""Returns the name of the RuntimeEnabledFeatures function.
The returned function checks if a method/attribute is enabled.
Given extended attribute RuntimeEnabled=FeatureName, return:
RuntimeEnabledFeatures::{featureName}Enabled
"""
extended_attributes = definition_or_member.extended_attributes
if 'RuntimeEnabled' not in extended_attributes:
return None
feature_name = extended_attributes['RuntimeEnabled']
return 'RuntimeEnabledFeatures::%sEnabled' % uncapitalize(feature_name)
# [Unforgeable]
def is_unforgeable(interface, member):
return ('Unforgeable' in interface.extended_attributes or
'Unforgeable' in member.extended_attributes)
################################################################################
# Indexed properties
# http://heycam.github.io/webidl/#idl-indexed-properties
################################################################################
def indexed_property_getter(interface):
try:
# Find indexed property getter, if present; has form:
# getter TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG1)
return next(
method
for method in interface.operations
if ('getter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
def indexed_property_setter(interface):
try:
# Find indexed property setter, if present; has form:
# setter RETURN_TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG1, ARG_TYPE ARG2)
return next(
method
for method in interface.operations
if ('setter' in method.specials and
len(method.arguments) == 2 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
def indexed_property_deleter(interface):
try:
# Find indexed property deleter, if present; has form:
# deleter TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG)
return next(
method
for method in interface.operations
if ('deleter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
################################################################################
# Named properties
# http://heycam.github.io/webidl/#idl-named-properties
################################################################################
def named_property_getter(interface):
try:
# Find named property getter, if present; has form:
# getter TYPE [OPTIONAL_IDENTIFIER](DOMString ARG1)
getter = next(
method
for method in interface.operations
if ('getter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'DOMString'))
getter.name = getter.name or 'anonymousNamedGetter'
return getter
except StopIteration:
return None
def named_property_setter(interface):
try:
# Find named property setter, if present; has form:
# setter RETURN_TYPE [OPTIONAL_IDENTIFIER](DOMString ARG1, ARG_TYPE ARG2)
return next(
method
for method in interface.operations
if ('setter' in method.specials and
len(method.arguments) == 2 and
str(method.arguments[0].idl_type) == 'DOMString'))
except StopIteration:
return None
def named_property_deleter(interface):
try:
# Find named property deleter, if present; has form:
# deleter TYPE [OPTIONAL_IDENTIFIER](DOMString ARG)
return next(
method
for method in interface.operations
if ('deleter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'DOMString'))
except StopIteration:
return None
IdlInterface.indexed_property_getter = property(indexed_property_getter)
IdlInterface.indexed_property_setter = property(indexed_property_setter)
IdlInterface.indexed_property_deleter = property(indexed_property_deleter)
IdlInterface.named_property_getter = property(named_property_getter)
IdlInterface.named_property_setter = property(named_property_setter)
IdlInterface.named_property_deleter = property(named_property_deleter)
| |
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
Series,
)
import pandas._testing as tm
class TestSeriesSortValues:
def test_sort_values(self, datetime_series):
# check indexes are reordered corresponding with the values
ser = Series([3, 2, 4, 1], ["A", "B", "C", "D"])
expected = Series([1, 2, 3, 4], ["D", "B", "A", "C"])
result = ser.sort_values()
tm.assert_series_equal(expected, result)
ts = datetime_series.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.sort_values()
assert np.isnan(result[-5:]).all()
tm.assert_numpy_array_equal(result[:-5].values, np.sort(vals[5:]))
# na_position
result = ts.sort_values(na_position="first")
assert np.isnan(result[:5]).all()
tm.assert_numpy_array_equal(result[5:].values, np.sort(vals[5:]))
# something object-type
ser = Series(["A", "B"], [1, 2])
# no failure
ser.sort_values()
# ascending=False
ordered = ts.sort_values(ascending=False)
expected = np.sort(ts.dropna().values)[::-1]
tm.assert_almost_equal(expected, ordered.dropna().values)
ordered = ts.sort_values(ascending=False, na_position="first")
tm.assert_almost_equal(expected, ordered.dropna().values)
# ascending=[False] should behave the same as ascending=False
ordered = ts.sort_values(ascending=[False])
expected = ts.sort_values(ascending=False)
tm.assert_series_equal(expected, ordered)
ordered = ts.sort_values(ascending=[False], na_position="first")
expected = ts.sort_values(ascending=False, na_position="first")
tm.assert_series_equal(expected, ordered)
msg = "ascending must be boolean"
with pytest.raises(ValueError, match=msg):
ts.sort_values(ascending=None)
msg = r"Length of ascending \(0\) must be 1 for Series"
with pytest.raises(ValueError, match=msg):
ts.sort_values(ascending=[])
msg = r"Length of ascending \(3\) must be 1 for Series"
with pytest.raises(ValueError, match=msg):
ts.sort_values(ascending=[1, 2, 3])
msg = r"Length of ascending \(2\) must be 1 for Series"
with pytest.raises(ValueError, match=msg):
ts.sort_values(ascending=[False, False])
msg = "ascending must be boolean"
with pytest.raises(ValueError, match=msg):
ts.sort_values(ascending="foobar")
# inplace=True
ts = datetime_series.copy()
return_value = ts.sort_values(ascending=False, inplace=True)
assert return_value is None
tm.assert_series_equal(ts, datetime_series.sort_values(ascending=False))
tm.assert_index_equal(
ts.index, datetime_series.sort_values(ascending=False).index
)
# GH#5856/5853
# Series.sort_values operating on a view
df = DataFrame(np.random.randn(10, 4))
s = df.iloc[:, 0]
msg = (
"This Series is a view of some other array, to sort in-place "
"you must create a copy"
)
with pytest.raises(ValueError, match=msg):
s.sort_values(inplace=True)
def test_sort_values_categorical(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c.copy())
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"], ordered=False), index=[0, 3, 1, 2]
)
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(
Categorical(
["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True
)
)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
raw_cat2 = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
s = ["a", "b", "c", "d"]
df = DataFrame(
{"unsort": raw_cat1, "sort": raw_cat2, "string": s, "values": [1, 2, 3, 4]}
)
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
assert res["sort"].dtype == "category"
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
tm.assert_series_equal(res["values"], exp["values"])
assert res["sort"].dtype == "category"
assert res["unsort"].dtype == "category"
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH#7848
df = DataFrame(
{"id": [6, 5, 4, 3, 2, 1], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
df["grade"] = Categorical(df["raw_grade"], ordered=True)
df["grade"] = df["grade"].cat.set_categories(["b", "e", "a"])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=["grade"])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=["grade", "id"])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_list, sorted_list, ignore_index, output_index",
[
([2, 3, 6, 1], [6, 3, 2, 1], True, [0, 1, 2, 3]),
([2, 3, 6, 1], [6, 3, 2, 1], False, [2, 1, 0, 3]),
],
)
def test_sort_values_ignore_index(
self, inplace, original_list, sorted_list, ignore_index, output_index
):
# GH 30114
ser = Series(original_list)
expected = Series(sorted_list, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_ser = ser.copy()
result_ser.sort_values(ascending=False, **kwargs)
else:
result_ser = ser.sort_values(ascending=False, **kwargs)
tm.assert_series_equal(result_ser, expected)
tm.assert_series_equal(ser, Series(original_list))
def test_sort_values_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3])
msg = (
r"In a future version of pandas all arguments of Series\.sort_values "
r"will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.sort_values(0)
expected = Series([1, 2, 3])
tm.assert_series_equal(result, expected)
class TestSeriesSortingKey:
def test_sort_values_key(self):
series = Series(np.array(["Hello", "goodbye"]))
result = series.sort_values(axis=0)
expected = series
tm.assert_series_equal(result, expected)
result = series.sort_values(axis=0, key=lambda x: x.str.lower())
expected = series[::-1]
tm.assert_series_equal(result, expected)
def test_sort_values_key_nan(self):
series = Series(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = series.sort_values(axis=0)
expected = series.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_series_equal(result, expected)
result = series.sort_values(axis=0, key=lambda x: x + 5)
expected = series.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_series_equal(result, expected)
result = series.sort_values(axis=0, key=lambda x: -x, ascending=False)
expected = series.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_series_equal(result, expected)
| |
"""A script that contains all functions to do RNA-seq epistasis analysis."""
# important stuff:
import pandas as pd
import numpy as np
# Graphics
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.odr as odr
# labeller:
import gvars
from scipy.stats import gaussian_kde
from matplotlib import rc
rc('text', usetex=True)
rc('text.latex', preamble=r'\usepackage{cmbright}')
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
# JB's favorite Seaborn settings for notebooks
rc = {'lines.linewidth': 2,
'axes.labelsize': 18,
'axes.titlesize': 18,
'axes.facecolor': 'DFDFE5'}
sns.set_context('paper', rc=rc)
sns.set_style("dark")
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['legend.fontsize'] = 16
genvar = gvars.genvars()
epi = gvars.epistasis()
def label(code1, code2):
"""A function to make epistasis labels given two code-letters."""
return '{0} $>$ {1}'.format(genvar.fancy_mapping[code1],
genvar.fancy_mapping[code2])
def find_overlap(genotypes, df, q=0.1, col='code'):
"""Given a list of genotypes, df and a q-value, find DEG common to all."""
# find only DE genes:
sig = df[(df[col].isin(genotypes)) & (df.qval < q)]
grouped = sig.groupby('target_id')
genes = []
for target, group in grouped:
# make sure the group contains all desired genotypes
all_in = (len(group[col].unique()) == len(genotypes))
if all_in:
genes += [target]
return genes
def find_STP(single_muts, double_mut, df, q=0.1):
"""
Finds Shared Transcriptomic Phenotype among 2 single and a double mutant.
Given 3 genotypes, find shared DE genes and return sliced dataframes.
Params:
single_muts - a list containing exactly two elements
double_muts - a code for a double mutant
df - a tidy dataframe. must contain columns 'target_id' and 'code'
Output:
x, y, xy - Dataframes containing DE genes and all relevant information
(Betas, q-values, etc...). First dataframe corresponds to single_muts[0],
second dataframe corresponds to genotype single_muts[1] and third is the
double mutant information.
"""
if type(single_muts) is not list:
raise ValueError('single_muts must be of type list')
if type(double_mut) is not str:
raise ValueError('double_mut must be of type str')
# find the overlapping gene list
genes = find_overlap(single_muts + [double_mut], df)
# extract the dataframes
x = df[(df.target_id.isin(genes)) &
(df.code == single_muts[0])]
y = df[(df.target_id.isin(genes)) &
(df.code == single_muts[1])]
xy = df[(df.target_id.isin(genes)) &
(df.code == double_mut)]
# return the dataframes
return x, y, xy
def f(B, x):
"""A linear function for the ODR."""
return B*(x)
def perform_odr(add, dev, wadd, wdev):
"""
A wrapper to calculate an ODR regression.
params:
-------
add, dev - x and y axis of the regression
wadd, wdev - standard deviations
returns:
an ODR object
"""
linear = odr.Model(f)
# mydata = odr.Data(add, dev, wd=1./wadd, we=1./wdev)
mydata = odr.RealData(add, dev, sx=wadd, sy=wdev)
myodr = odr.ODR(mydata, linear, beta0=[0])
myoutput = myodr.run()
return myoutput
def ODR(singles, double, epistasis):
"""Find the ODR in epistasis plot between single muts and a double mut."""
# errors:
if len(singles) != 2:
raise ValueError('`singles` must be a list with two dataframes!')
if type(double) is not pd.DataFrame:
raise ValueError('`double` must be a dataframe!')
try:
epistasis = epistasis.lower()
except:
raise ValueError('epistasis must be a string!')
if epistasis not in ['actual', 'xy=x', 'xy=y']:
raise ValueError('epistasis must be one of `actual`, `xy=x`, `xy=y`')
# define the X-coordinate as the additive model of interaction
X = singles[0].b.values + singles[1].b.values
# fit an ODR model
wadd = np.sqrt(singles[1].se_b.values**2 + singles[0].se_b.values**2)
if epistasis == 'actual':
# calculate deviation standard error:
wdev = double.se_b.values**2
for i, df in enumerate(singles):
wdev += df.se_b.values**2
wdev = np.sqrt(wdev)
# calculate:
output = perform_odr(X, double.b.values - X, wadd=wadd, wdev=wdev)
if epistasis == 'xy=x':
# if XY = X, then XY - X - Y = -Y
output = perform_odr(X, -singles[1].b.values, wadd=wadd,
wdev=singles[1].se_b.values)
if epistasis == 'xy=y':
# if XY = Y, then XY - X - Y = -X
output = perform_odr(X, -singles[0].b.values, wadd=wadd,
wdev=singles[0].se_b.values)
return output
def plot_epistasis_regression(X, slope, **kwargs):
"""Plot the ODR line."""
# find the xmin and xmax:
xmin = X.min()
xmax = X.max()
x = np.linspace(xmin - 0.1, xmax + 0.1, 1000)
y0 = x*slope
# plot the models
plt.plot(x, y0, **kwargs)
def draw_bs_sample(n):
"""Draw a bootstrap sample from a 1D data set."""
ind = np.arange(0, n)
return np.random.choice(ind, size=n)
def bootstrap(bframe, sebframe, epistasis='actual', nsim=1000):
"""
Perform non-parametric bootstrapping for an epistasis ODR.
Given a list of three numpy vectors containing betas and a separate list of
vectors containing their standard errors, fit a model according to the
`epistasis` parameter indicated and bootstrap it. The vectors MUST
be provided in the order [X, Y, XY], where X is the first genotype, Y is
the second genotype and XY is the double mutant.
Params:
bframe - a list of numpy vectors containing the betas for each genotype
sebframe - a list of numpy vectors containing the se_b for each genotype
epistasis - kind of model to simulate. One of:
'actual', 'suppress', 'xy=x+y', 'xy=x', 'xy=y','xy=x=y'.
nsim - number of iterations to be performed. Must be >0
Output:
s, se_s
"""
nsim = int(nsim)
# unpack
xb, yb, xyb = bframe
xseb, yseb, xyseb = sebframe
s = np.zeros(nsim)
# draw bootstrap repetitions
for i in range(nsim):
# sample data, keeping tuples paired:
ind = draw_bs_sample(len(xb))
currx = xb[ind]
curry = yb[ind]
currxy = xyb[ind]
currsex = xseb[ind]
currsey = yseb[ind]
currsexy = xyseb[ind]
# different bootstraps to do:
# for the actual data, do a non-parametric bootstrap
wadd = np.sqrt(currsex**2 + currsey**2)
if epistasis == 'actual':
X = currx + curry
Y = currxy - X
wdev = np.sqrt(wadd**2 + currsexy**2)
elif epistasis == 'xy=x':
X = currx + curry
Y = -curry
wdev = currsey
elif epistasis == 'xy=y':
X = currx + curry
Y = -currx
wdev = currsex
# for all others, do a parametric bootstrap
# because we know what the slope should be,
# but we need to generate a structure to test
# against. Non-parametric bootstrapping will
# yield perfect lines every time.
elif epistasis == 'xy=x+y':
X = currx + curry
Y = np.random.normal(0, wadd, len(X))
wdev = wadd
elif epistasis == 'xy=x=y':
# flip a coin:
coin = np.random.randint(0, 1)
# half the time use the X data
# half the time use the Y
if coin == 0:
wadd = np.sqrt(2*currsex**2)
wdev = currsex
X = currx + np.random.normal(0, wadd, len(curry))
Y = -1/2*currx + np.random.normal(0, wdev, len(currx))
else:
wadd = np.sqrt(2)*currsey
wdev = currsey
X = curry + np.random.normal(0, wadd, len(curry))
Y = -1/2*curry + np.random.normal(0, wdev, len(curry))
elif epistasis == 'suppress':
# flip a coin:
coin = np.random.randint(0, 2)
# half the time use the X data
# half the time use the Y
if coin == 0:
wadd = np.sqrt(2)*currsex
wdev = currsey
X = curry + np.random.normal(0, wadd, len(curry))
Y = -curry + np.random.normal(0, wdev, len(curry))
else:
wadd = np.sqrt(2)*currsex
wdev = currsex
X = currx + np.random.normal(0, wadd, len(currx))
Y = -currx + np.random.normal(0, wdev, len(currx))
# do calcs and store in vectors
output = perform_odr(X, Y, wadd=wadd, wdev=wdev)
# extract the slope and standard error from the output
# and store it
s[i] = output.beta[0]
# se_s[i] = output.sd_beta[0]
return s
def bootstrap_regression(singles, double, df, epistasis='actual', nsim=100):
"""
Perform a bootstrap regression for the desired epistatic model.
Params:
singles - a list of 2 genotypes that make up the double mutant
double - a string containing the ID of the double mutant.
df - a tidy dataframe. must have columns 'target_id', 'b', 'se_b', 'qval'
'code', and 'genotype'
epistasis - kind of model to simulate. One of:
'actual', 'suppress', 'xy=x+y', 'xy=x', 'xy=y','xy=x=y'.
nsim - number of simulations to perform
Outputs:
s - numpy vector containing all the ODR slope values from the bootstrap
se_s - numpy vector containing all the ODR standard error of the slope
values from the bootstrap
"""
nsim = int(nsim)
x, y, xy = find_STP(singles, double, df)
xb = x.b.values
yb = y.b.values
xyb = xy.b.values
xseb = x.se_b.values
yseb = y.se_b.values
xyseb = xy.se_b.values
beta = bootstrap([xb, yb, xyb],
[xseb, yseb, xyseb],
epistasis=epistasis,
nsim=nsim)
return beta
def epiplot(X, Y, Y_se, **kwargs):
"""Given two arrays, X and Y, plot the points."""
plot_unbranched = kwargs.pop('plot_unbranched', False)
beta = kwargs.pop('beta', np.nan)
s0 = kwargs.pop('s0', 15)
cmap = kwargs.pop('cmap', 'viridis')
ax = kwargs.pop('ax', None)
# Calculate the point density
points = np.vstack([X, Y])
z = gaussian_kde(points)(points)
# plot:
if ax is None:
fig, ax = plt.subplots()
if len(X) > 50:
ax.scatter(X, Y, c=z, s=s0/Y_se,
edgecolor='', cmap=cmap, alpha=0.5)
else:
ax.scatter(X, Y, s=s0/np.sqrt(Y_se),
color='#33a02c', alpha=.9)
if plot_unbranched:
smoothX = np.linspace(X.min() - 0.5, X.max() + 0.5, 1000)
plt.plot(smoothX, -1/2*smoothX, color='#1f78b4', ls='--',
label='Unbranched Pathway')
if beta:
plot_epistasis_regression(X, beta, ls='-', lw=2.3,
color='#33a02c', label='data fit')
plt.xlabel(r'Predicted log-Additive Effect')
plt.ylabel(r'Deviation from log-Additive Effect')
plt.legend()
return ax
def make_epiplot(singles, double, df, **kwargs):
"""
Draw an epistasis plot of the data.
Params:
singles - a list of 2 genotypes that make up the double mutant
double - a string containing the ID of the double mutant.
Output:
x - tidy dataframe containing the DE gene data for singles[0]
y - tidy dataframe containing the DE gene data for singles[1]
xy - tidy dataframe containing the DE gene data for the double mutant
ax - axis containing the plot
"""
x, y, xy = find_STP(singles, double, df)
actual = ODR([x, y], xy, 'actual')
# transform coordinates:
X = x.b.values + y.b.values
Y = xy.b.values - X
Y_se = np.sqrt(x.se_b.values**2 + y.se_b.values**2 + xy.se_b.values**2)
ax = epiplot(X, Y, Y_se, plot_unbranched=True, beta=actual.beta)
return x, y, xy, ax
def calculate_all_bootstraps(x, y, xy, df, nsim=5000):
"""
Given two double mutants and a double find the bootstrapped epistasis coef.
Params:
x
y
xy
df
nsim
Output:
epicoef, epierr
"""
models = epi.models
epicoef = {}
for model in models:
s = bootstrap_regression([x, y], xy, df,
epistasis=model, nsim=nsim)
epicoef[model] = s
return epicoef
def plot_bootstraps(x, y, epicoef, **kwargs):
"""Make KDE plots of the bootstrapped epistasis coefficients."""
# make dictionaries for plotting
colors = {'actual': '#33a02c', 'xy=x': 'blue', 'xy=y': 'k',
'xy=x=y': '#1f78b4', 'xy=x+y': '#ff7f00', 'suppress': '#e31a1c'
}
labels = {'actual': 'data', 'xy=x': label(x, y),
'xy=y': label(y, x), 'xy=x=y': 'Unbranched',
'xy=x+y': 'log-Additive', 'suppress': 'Suppression'
}
# checks and balances
if type(epicoef) is not dict:
raise ValueError('epicoef must be a dictionary')
epistasis_choice = ['actual', 'xy=x', 'xy=y', 'xy=x=y', 'xy=x+y',
'suppress']
for epistasis in epistasis_choice:
if epistasis.lower() not in epicoef.keys():
warning = 'epicoef must contain keys for all epistasis models'
raise ValueError(warning)
if len(epicoef[epistasis.lower()]) < 10:
warning = 'too few bootstraps. Please perform >100' + \
'bootstraps per test'
raise ValueError(warning)
fig, ax = plt.subplots()
for model, s in epicoef.items():
try:
sns.kdeplot(data=s, label=labels[model.lower()],
color=colors[model.lower()], **kwargs)
except:
print('{0} did not have a label'.format(model))
next
# plot a horizontal line wherever the actual data mean is
plt.gca().axvline(epicoef['actual'].mean(), color='#33a02c', ls='--', lw=3)
plt.xlabel('Epistasis Coefficient')
plt.ylabel('Cumulative Density Function')
return ax
def permutation_test(s):
"""Perform a permutation test on the slope the genetic data."""
epistasis = ['xy=x', 'xy=y', 'xy=x=y', 'xy=x+y',
'suppress']
diff = {}
for epi in epistasis:
d = [s['actual'][i] - s[epi][i] for i in range(len(s[epi]))]
diff[epi] = d
return diff
def message(name, pval, alpha=0.01):
"""Write a message."""
if pval < alpha:
return '{0} can be rejected (pval <= {1:.2g})'.format(name, pval)
else:
return '{0} cannot be rejected (pval = {1:.2g})'.format(name, pval)
def calculate_pval(s, diff):
"""Given `s` and `diff`, print out the p-values for each comparison."""
for key, array in diff.items():
# test =
if s[key].mean() > s['actual'].mean():
pval = len(array[array > 0])/len(array)
else:
pval = len(array[array < 0])/len(array)
if pval == 0:
p = 1/(len(array)/10)
else:
p = pval
mess = message(key, p)
print(mess)
| |
#!/usr/local/bin/python3
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import struct
from dateutil import parser
from client_messages import parse_client_message
from decoder_base import DecoderBase
from message_types import message_types
from numeric_conversion import to_hex_digit
class ClientMessageDecoder(DecoderBase):
def __init__(self, output_queue):
super(ClientMessageDecoder, self).__init__(output_queue)
self.STATE_NEUTRAL_ = 0
self.STATE_FOUND_SECURITY_FOOTER_ = 1
self.send_trace_parts_retriever_ = None
self.send_trace_parser_ = None
self.connection_states_ = {}
self.nc_version_ = None
self.send_trace_parts_retriever_ = None
self.get_send_trace_parts_functions = {
"0.0.42": self.get_send_trace_parts_base,
"10.0.3": self.get_send_trace_parts_base,
"10.1.1": self.get_send_trace_parts_base,
"10.1.2": self.get_send_trace_parts_base,
"10.1.3": self.get_send_trace_parts_base,
"9.1.1": self.get_send_trace_parts_v911,
}
self.send_trace_parsers = {
"0.0.42": self.parse_request_fields_base,
"10.0.3": self.parse_request_fields_base,
"10.1.1": self.parse_request_fields_base,
"10.1.2": self.parse_request_fields_base,
"10.1.3": self.parse_request_fields_base,
"9.1.1": self.parse_request_fields_v911,
}
#
# Native client code believes this is the list of messages that require a security footer.
# We will use this list to verify and report if a message is sent that needs one but doesn't
# have it, since this has been the source of at least one difficult-to-diagnose bug in the
# past. To see the decision-making code that filters on this message list, look at
# ThinClientBaseDM::beforeSendingRequest and TcrMessage::isUserInitiativeOps in geode-native
# C++ code base.
self.message_requires_security_part = [
"ADD_PDX_ENUM",
"ADD_PDX_TYPE",
"CLIENT_READY",
"CLOSE_CONNECTION",
"COMMIT",
"GETCQSTATS_MSG_TYPE",
"GET_CLIENT_PARTITION_ATTRIBUTES",
"GET_CLIENT_PR_METADATA",
"GET_ENTRY",
"GET_FUNCTION_ATTRIBUTES",
"GET_PDX_ENUM_BY_ID",
"GET_PDX_ID_FOR_ENUM",
"GET_PDX_ID_FOR_TYPE",
"GET_PDX_TYPE_BY_ID",
"INVALID",
"MAKE_PRIMARY",
"MONITORCQ_MSG_TYPE",
"PERIODIC_ACK",
"PING",
"REQUEST_EVENT_VALUE",
"ROLLBACK"
"SIZE",
"TX_FAILOVER",
"TX_SYNCHRONIZATION",
"USER_CREDENTIAL_MESSAGE",
]
def search_for_version(self, line):
if self.nc_version_ == None:
expression = re.compile(r"Product version:.*Native (\d+)\.(\d+)\.(\d+)-")
match = expression.search(line)
if match:
major = match.group(1)
minor = match.group(2)
patch = match.group(3)
self.nc_version_ = major + "." + minor + "." + patch
self.send_trace_parts_retriever_ = self.get_send_trace_parts_functions[
self.nc_version_
]
self.send_trace_parser_ = self.send_trace_parsers[self.nc_version_]
def get_send_trace_parts_v911(self, line, parts):
result = False
expression = re.compile(
r"(\d\d:\d\d:\d\d\.\d+).*TcrConnection::send:\s*\[([\d|a-f|A-F|x|X]+).*sending request to endpoint.*bytes:\s*([\d| ]+)"
)
match = expression.search(line)
if match:
parts.append(dateutil.parser.parse(match.group(1)))
parts.append(match.group(2))
parts.append(match.group(3))
result = True
return result
def get_send_trace_parts_base(self, line, parts):
result = False
expression = re.compile(
r"(\d\d:\d\d:\d\d\.\d+).*TcrConnection::send:\s*\[([\d|a-f|A-F|x|X]+).*sending request to endpoint.*bytes:\s*([\d|a-f|A-F]+)"
)
match = expression.search(line)
if match:
parts.append(parser.parse(match.group(1)))
parts.append(match.group(2))
parts.append(match.group(3))
result = True
return result
def get_send_trace_parts(self, line, parts):
if self.send_trace_parts_retriever_ is not None:
return self.send_trace_parts_retriever_(line, parts)
def get_add_security_trace_parts(self, line, parts):
result = False
expression = re.compile(
r"(\d\d:\d\d:\d\d\.\d+).*TcrMessage::addSecurityPart\s*\[(0x[\d|a-f|A-F]*).*length\s*=\s*(\d+)\s*,\s*encrypted\s+ID\s*=\s*([\d|a-f|A-F]+)"
)
match = expression.search(line)
if match:
parts.append(parser.parse(match.group(1)))
parts.append(match.group(2))
parts.append(match.group(3))
parts.append(match.group(4))
result = True
return result
def decimal_string_to_hex_string(self, byte):
high_nibble = int(int(byte) / 16)
low_nibble = int(byte) % 16
return to_hex_digit[high_nibble] + to_hex_digit[low_nibble]
def format_bytes_as_hex_v911(self, message_bytes):
byte_list = message_bytes.split(" ")
hex_string = ""
for byte in byte_list:
if byte:
hex_string += self.decimal_string_to_hex_string(byte)
return hex_string
def parse_request_fields_v911(self, message_bytes):
hex_message_bytes = self.format_bytes_as_hex_v911(message_bytes)
message_type = message_types[int(hex_message_bytes[0:8], 16)]
message_length = int(hex_message_bytes[8:16], 16)
message_number_of_parts = int(hex_message_bytes[16:24], 16)
message_transaction_id = struct.unpack(
">i", bytes.fromhex(hex_message_bytes[24:32])
)[0]
message_security_flag = (int(hex_message_bytes[32:34], 16) & 0x02) >> 1
return (
message_type,
message_length,
message_number_of_parts,
message_transaction_id,
message_security_flag,
)
def parse_request_fields_base(self, message_bytes):
message_type = message_types[int(message_bytes[0:8], 16)]
message_length = int(message_bytes[8:16], 16)
message_number_of_parts = int(message_bytes[16:24], 16)
message_transaction_id = struct.unpack(
">i", bytes.fromhex(message_bytes[24:32])
)[0]
message_security_flag = (int(message_bytes[32:34], 16) & 0x02) >> 1
return (
message_type,
message_length,
message_number_of_parts,
message_transaction_id,
message_security_flag,
)
def parse_request_fields(self, message_bytes):
if self.send_trace_parser_ is not None:
return self.send_trace_parser_(message_bytes)
def request_requires_security_footer(self, message_type):
return message_type in self.message_requires_security_part
def process_line(self, line):
connection = None
is_send_trace = False
is_add_security_trace = False
send_trace = {}
self.search_for_version(line)
parts = []
if self.get_send_trace_parts(line, parts):
send_trace["Timestamp"], send_trace["Connection"], message_bytes = parts
is_send_trace = True
elif self.get_add_security_trace_parts(line, parts):
timestamp, connection, security_footer_length, message_bytes = parts
is_add_security_trace = True
else:
return
if connection not in self.connection_states_:
self.connection_states_[connection] = self.STATE_NEUTRAL_
if self.connection_states_[connection] == self.STATE_NEUTRAL_:
if is_add_security_trace:
self.connection_states_[connection] = self.STATE_FOUND_SECURITY_FOOTER_
elif is_send_trace:
send_trace["Direction"] = "--->"
(
send_trace["Type"],
send_trace["Length"],
send_trace["Parts"],
send_trace["TransactionId"],
send_trace["SecurityFlag"],
) = self.parse_request_fields(message_bytes)
if (send_trace["SecurityFlag"] == 1) and (
self.request_requires_security_footer(str(send_trace["Type"]))
):
print(
"ERROR: Security flag is set, but no footer was added for this message!"
)
parse_client_message(send_trace, message_bytes)
self.output_queue_.put({"message": send_trace})
elif self.connection_states_[connection] == self.STATE_FOUND_SECURITY_FOOTER_:
if is_send_trace:
send_trace["Direction"] = "--->"
(
send_trace["Type"],
send_trace["Length"],
send_trace["Parts"],
send_trace["TransactionId"],
send_trace["SecurityFlag"],
) = self.parse_request_fields(message_bytes)
self.output_queue_.put({"message": send_trace})
| |
import fnmatch
import re
import threading
from collections import defaultdict, OrderedDict
import botconfig
LANGUAGE = 'en'
MINIMUM_WAIT = 60
EXTRA_WAIT = 30
EXTRA_WAIT_JOIN = 0 # Add this many seconds to the waiting time for each !join
WAIT_AFTER_JOIN = 25 # Wait at least this many seconds after the last join
# !wait uses a token bucket
WAIT_TB_INIT = 2 # initial number of tokens
WAIT_TB_DELAY = 240 # wait time between adding tokens
WAIT_TB_BURST = 3 # maximum number of tokens that can be accumulated
STATS_RATE_LIMIT = 60
VOTES_RATE_LIMIT = 60
ADMINS_RATE_LIMIT = 300
GSTATS_RATE_LIMIT = 0
PSTATS_RATE_LIMIT = 0
TIME_RATE_LIMIT = 10
START_RATE_LIMIT = 10 # (per-user)
WAIT_RATE_LIMIT = 10 # (per-user)
SHOTS_MULTIPLIER = .12 # ceil(shots_multiplier * len_players) = bullets given
SHARPSHOOTER_MULTIPLIER = 0.06
MIN_PLAYERS = 4
MAX_PLAYERS = 24
DRUNK_SHOTS_MULTIPLIER = 3
NIGHT_TIME_LIMIT = 120
NIGHT_TIME_WARN = 90 # should be less than NIGHT_TIME_LIMIT
DAY_TIME_LIMIT = 720
DAY_TIME_WARN = 600 # should be less than DAY_TIME_LIMIT
JOIN_TIME_LIMIT = 3600
# May only be set if the above are also set
SHORT_DAY_PLAYERS = 6 # Number of players left to have a short day
SHORT_DAY_LIMIT = 520
SHORT_DAY_WARN = 400
# If time lord dies, the timers get set to this instead (60s day, 30s night)
TIME_LORD_DAY_LIMIT = 60
TIME_LORD_DAY_WARN = 45
TIME_LORD_NIGHT_LIMIT = 30
TIME_LORD_NIGHT_WARN = 20
KILL_IDLE_TIME = 300
WARN_IDLE_TIME = 180
PM_WARN_IDLE_TIME = 240
PART_GRACE_TIME = 30
QUIT_GRACE_TIME = 60
ACC_GRACE_TIME = 30
START_QUIT_DELAY = 10
# controls how many people it does in one /msg; only works for messages that are the same
MAX_PRIVMSG_TARGETS = 4
# how many mode values can be specified at once; used only as fallback
MODELIMIT = 3
QUIET_DEAD_PLAYERS = False
DEVOICE_DURING_NIGHT = False
ALWAYS_PM_ROLE = False
QUIET_MODE = "q" # "q" or "b"
QUIET_PREFIX = "" # "" or "~q:"
ACCOUNT_PREFIX = "$a:" # "$a:" or "~a:"
# The bot will automatically toggle those modes of people joining
AUTO_TOGGLE_MODES = ""
DEFAULT_EXPIRY = "30d"
LEAVE_PENALTY = 0
LEAVE_EXPIRY = "30d"
IDLE_PENALTY = 0
IDLE_EXPIRY = "30d"
PART_PENALTY = 0
PART_EXPIRY = "30d"
ACC_PENALTY = 0
ACC_EXPIRY = "30d"
# If True, disallows adding stasis via !fstasis (requires warnings instead)
RESTRICT_FSTASIS = True
# The formatting of this sucks, sorry. This is used to automatically apply sanctions to warning levels
# When a user crosses from below the min threshold to min or above points, the listed sanctions apply
# Sanctions also apply while moving within the same threshold bracket (such as from min to max)
# Valid sanctions are deny, stasis, scalestasis, and tempban
# Scalestasis applies stasis equal to the formula ax^2 + bx + c, where x is the number of warning points
# Tempban number can either be a duration (ending in d, h, or m) or a number meaning it expires when
# warning points fall below that threshold.
AUTO_SANCTION = (
#min max sanctions
(5, 9, {"stasis": 1}),
(10, 14, {"stasis": 3}),
(15, 24, {"scalestasis": (0, 1, -10)}),
(25, 25, {"tempban": 15})
)
# The following is a bitfield, and they can be mixed together
# Defaults to none of these, can be changed on a per-game-mode basis
RESTRICT_WOLFCHAT = 0x00
### DO NOT CHANGE THESE!
### They are for easier code interpretation/modification
RW_DISABLE_NIGHT = 0x01 # Disable during night (commands are still relayed)
RW_DISABLE_DAY = 0x02 # Disable during day (commands are still relayed)
RW_ONLY_KILL_CMD = 0x04 # Only relay kill commands when wolfchat is disabled
RW_ONLY_SAME_CMD = 0x08 # Only relay commands to other people who have access to the same command
RW_WOLVES_ONLY_CHAT = 0x10 # Non-wolves cannot participate in wolfchat (commands still relayed as applicable)
RW_NO_INTERACTION = 0x20 # Do not relay commands to/from non-wolves regardless of other settings
RW_REM_NON_WOLVES = 0x40 # Remove non-wolves from wolfchat entirely (can be killed, do not count towards wolf win condition, do not show in wolflist, etc.)
RW_TRAITOR_NON_WOLF = 0x80 # Consider traitor as a non-wolf for the purposes of the above restrictions (if unset, traitor is treated the same as wolf cub)
ENABLE_DEADCHAT = True # dead players can communicate with each other
DYNQUIT_DURING_GAME = False # are dynamic quit messages used while a game is in progress? Note that true will break certain stats scrapers
GOAT_HERDER = True
ABSTAIN_ENABLED = True # whether village can !abstain in order to not vote anyone during day
LIMIT_ABSTAIN = True # if true, village will be limited to successfully !abstaining a vote only once
SELF_LYNCH_ALLOWED = True
HIDDEN_TRAITOR = True
HIDDEN_AMNESIAC = False # amnesiac still shows as amnesiac if killed even after turning
HIDDEN_CLONE = False
GUARDIAN_ANGEL_CAN_GUARD_SELF = True
START_WITH_DAY = False
WOLF_STEALS_GUN = True # at night, the wolf can steal steal the victim's bullets
ROLE_REVEAL = "on" # on/off/team - what role information is shown on death
STATS_TYPE = "default" # default/accurate/team/disabled - what role information is shown when doing !stats
LOVER_WINS_WITH_FOOL = False # if fool is lynched, does their lover win with them?
DEFAULT_SEEN_AS_VILL = True # non-wolves are seen as villager regardless of the default role
START_VOTES_SCALE = 0.3
START_VOTES_MAX = 4
# Debug mode settings, whether or not timers and stasis should apply during debug mode
DISABLE_DEBUG_MODE_TIMERS = True
DISABLE_DEBUG_MODE_TIME_LORD = False
DISABLE_DEBUG_MODE_REAPER = True
DISABLE_DEBUG_MODE_STASIS = True
# Minimum number of players needed for mad scientist to skip over dead people when determining who is next to them
# Set to 0 to always skip over dead players. Note this is number of players that !joined, NOT number of players currently alive
MAD_SCIENTIST_SKIPS_DEAD_PLAYERS = 16
# How likely a default game is replaced by a villagergame game, 1 = 100% 0 = 0%
# villagergame has no wolves, the bot kills someone each night
# village wins if and only if they can unanimously !vote the bot during the day
VILLAGERGAME_CHANCE = 0
# HIT MISS SUICIDE HEADSHOT
GUN_CHANCES = ( 5/7 , 1/7 , 1/7 , 2/5 )
WOLF_GUN_CHANCES = ( 5/7 , 1/7 , 1/7 , 2/5 )
DRUNK_GUN_CHANCES = ( 2/7 , 3/7 , 2/7 , 2/5 )
SHARPSHOOTER_GUN_CHANCES = ( 1 , 0 , 0 , 1 )
GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE = 1/4
GUARDIAN_ANGEL_DIES_CHANCE = 0
BODYGUARD_DIES_CHANCE = 0
DETECTIVE_REVEALED_CHANCE = 2/5
SHARPSHOOTER_CHANCE = 1/5 # if sharpshooter is enabled, chance that a gunner will become a sharpshooter instead
FALLEN_ANGEL_KILLS_GUARDIAN_ANGEL_CHANCE = 1/2
# HALF FULL
PROPHET_REVEALED_CHANCE = ( 2/5 , 4/5 )
AMNESIAC_NIGHTS = 3 # amnesiac gets to know their actual role on this night
DOCTOR_IMMUNIZATION_MULTIPLIER = 0.135 # ceil(num_players * multiplier) = number of immunizations
TOTEM_ORDER = ( "shaman" , "crazed shaman" , "wolf shaman" )
TOTEM_CHANCES = { "death": ( 1 , 1 , 0 ),
"protection": ( 1 , 1 , 1 ),
"silence": ( 1 , 1 , 1 ),
"revealing": ( 1 , 1 , 0 ),
"desperation": ( 1 , 1 , 0 ),
"impatience": ( 1 , 1 , 1 ),
"pacifism": ( 1 , 1 , 1 ),
"influence": ( 1 , 1 , 0 ),
"narcolepsy": ( 0 , 1 , 0 ),
"exchange": ( 0 , 1 , 0 ),
"lycanthropy": ( 0 , 1 , 1 ),
"luck": ( 0 , 1 , 1 ),
"pestilence": ( 0 , 1 , 0 ),
"retribution": ( 0 , 1 , 1 ),
"misdirection": ( 0 , 1 , 1 ),
"deceit": ( 0 , 1 , 1 ),
}
GAME_MODES = {}
GAME_PHASES = ("night", "day") # all phases that constitute "in game", game modes can extend this with custom phases
ACCOUNTS_ONLY = False # If True, will use only accounts for everything
DISABLE_ACCOUNTS = False # If True, all account-related features are disabled. Automatically set if we discover we do not have proper ircd support for accounts
# This will override ACCOUNTS_ONLY if it is set
NICKSERV = "NickServ"
NICKSERV_IDENTIFY_COMMAND = "IDENTIFY {account} {password}"
NICKSERV_GHOST_COMMAND = "GHOST {nick}"
NICKSERV_RELEASE_COMMAND = "RELEASE {nick}"
NICKSERV_REGAIN_COMMAND = "REGAIN {nick}"
CHANSERV = "ChanServ"
CHANSERV_OP_COMMAND = "OP {channel}"
# TODO: move this to a game mode called "fixed" once we implement a way to randomize roles (and have that game mode be called "random")
DEFAULT_ROLE = "villager"
ROLE_INDEX = ( 4 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 15 , 16 , 18 , 20 , 21 , 23 , 24 )
ROLE_GUIDE = OrderedDict([ # This is order-sensitive - many parts of the code rely on this order!
# wolf roles
("wolf" , ( 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 2 , 2 , 2 , 3 , 3 , 3 )),
("alpha wolf" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("werecrow" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("werekitten" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("wolf mystic" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("wolf shaman" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("fallen angel" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("doomsayer" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("wolf cub" , ( 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("traitor" , ( 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("hag" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )),
("sorcerer" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 )),
("warlock" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("minion" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("cultist" , ( 0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
# villager roles
("seer" , ( 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("oracle" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("harlot" , ( 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("shaman" , ( 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("hunter" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("vigilante" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("augur" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 )),
("detective" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("prophet" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("guardian angel" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("bodyguard" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 )),
("priest" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("doctor" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("mad scientist" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("mystic" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("matchmaker" , ( 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("village drunk" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("time lord" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("villager" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
# neutral roles
("jester" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("fool" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("crazed shaman" , ( 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("dullahan" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("monster" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 )),
("piper" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("amnesiac" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 )),
("turncoat" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("clone" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("lycan" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("wild child" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("vengeful ghost" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("succubus" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("demoniac" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
# templates
("cursed villager" , ( 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 2 , 2 , 2 )),
("blessed villager" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )),
("gunner" , ( 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 2 , 2 )),
# NB: for sharpshooter, numbers can't be higher than gunner, since gunners get converted to sharpshooters. This is the MAX number of gunners that can be converted.
("sharpshooter" , ( 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
("mayor" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 )),
("assassin" , ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 )),
])
# Harlot dies when visiting, seer sees as wolf, gunner kills when shooting, GA and bodyguard have a chance at dying when guarding
# If every wolf role dies, and there are no remaining traitors, the game ends and villagers win (monster may steal win)
WOLF_ROLES = frozenset({"wolf", "alpha wolf", "werecrow", "wolf cub", "werekitten", "wolf mystic", "wolf shaman", "fallen angel", "doomsayer"})
# Access to wolfchat, and counted towards the # of wolves vs villagers when determining if a side has won
WOLFCHAT_ROLES = WOLF_ROLES | {"traitor", "hag", "sorcerer", "warlock"}
# Wins with the wolves, even if the roles are not necessarily wolves themselves
WOLFTEAM_ROLES = WOLFCHAT_ROLES | {"minion", "cultist"}
# These roles either steal away wins or can otherwise win with any team
TRUE_NEUTRAL_ROLES = frozenset({"crazed shaman", "fool", "jester", "monster", "clone", "piper", "turncoat", "succubus", "demoniac", "dullahan"})
# These are the roles that will NOT be used for when amnesiac turns, everything else is fair game! (var.DEFAULT_ROLE is also added if not in this set)
AMNESIAC_BLACKLIST = frozenset({"monster", "demoniac", "minion", "matchmaker", "clone", "doctor", "villager", "cultist", "piper", "dullahan", "wild child"})
# These roles are seen as wolf by the seer/oracle
SEEN_WOLF = WOLF_ROLES | {"monster", "mad scientist", "succubus"}
# These are seen as the default role (or villager) when seen by seer (this overrides SEEN_WOLF)
SEEN_DEFAULT = frozenset({"traitor", "hag", "sorcerer", "time lord", "villager", "cultist", "minion", "turncoat", "amnesiac",
"vengeful ghost", "lycan", "clone", "fool", "jester", "werekitten", "warlock", "piper", "demoniac"})
# These roles are notified that they are villager
HIDDEN_VILLAGERS = frozenset({"time lord"})
# These roles are notified that they are the default role. They also win alongside the default role barring other role-specific win conds.
HIDDEN_ROLES = frozenset({"vengeful ghost", "amnesiac"})
# These roles are win stealers, and are valid kills for vigilante
WIN_STEALER_ROLES = frozenset({"monster", "succubus", "demoniac", "piper", "fool"})
# these totems are beneficial for the *receiving* person, but can be detrimental to someone else acting on the receiver!
BENEFICIAL_TOTEMS = frozenset({"protection", "revealing", "desperation", "influence", "luck", "pestilence", "retribution"})
# The roles in here are considered templates and will be applied on TOP of other roles. The restrictions are a list of roles that they CANNOT be applied to
# NB: if you want a template to apply to everyone, list it here but make the restrictions an empty set. Templates not listed here are considered full roles instead
TEMPLATE_RESTRICTIONS = OrderedDict([
("cursed villager" , SEEN_WOLF | {"seer", "oracle", "fool", "jester", "priest"}),
("gunner" , WOLFTEAM_ROLES | {"fool", "lycan", "jester", "priest", "wild child"}),
("sharpshooter" , frozenset()), # the above gets automatically added to the set. this set is the list of roles that can be gunner but not sharpshooter
("mayor" , frozenset({"fool", "jester", "monster"})),
("assassin" , WOLF_ROLES | {"traitor", "seer", "augur", "oracle", "harlot", "detective", "bodyguard", "guardian angel", "lycan", "priest", "wild child"}),
("blessed villager" , frozenset(ROLE_GUIDE.keys()) - {"villager", "blessed villager", "mayor"}),
])
# make sharpshooter restrictions at least the same as gunner
TEMPLATE_RESTRICTIONS["sharpshooter"] |= TEMPLATE_RESTRICTIONS["gunner"]
# fallen angel can be assassin even though they are a wolf role
TEMPLATE_RESTRICTIONS["assassin"] -= {"fallen angel"}
# Roles listed here cannot be used in !fgame roles=blah. If they are defined in ROLE_GUIDE they may still be used.
DISABLED_ROLES = frozenset()
# Game modes that cannot be randomly picked or voted for
DISABLED_GAMEMODES = frozenset()
GIF_CHANCE = 1/50
FORTUNE_CHANCE = 1/25
ALL_FLAGS = frozenset("AaDdFjms")
RULES = (botconfig.CHANNEL + " channel rules: http://wolf.xnrand.com/rules")
GRAVEYARD_LOCK = threading.RLock()
WARNING_LOCK = threading.RLock()
WAIT_TB_LOCK = threading.RLock()
# vim: set sw=4 expandtab:
| |
"""
Numba-specific errors and warnings.
"""
import abc
import contextlib
import os
import sys
import warnings
import numba.core.config
import numpy as np
from collections import defaultdict
from numba.core.utils import chain_exception
from functools import wraps
from abc import abstractmethod
# Filled at the end
__all__ = []
class NumbaWarning(Warning):
"""
Base category for all Numba compiler warnings.
"""
def __init__(self, msg, loc=None, highlighting=True, ):
self.msg = msg
self.loc = loc
if highlighting:
highlight = termcolor().errmsg
else:
def highlight(x):
return x
if loc:
super(NumbaWarning, self).__init__(
highlight("%s\n%s\n" % (msg, loc.strformat())))
else:
super(NumbaWarning, self).__init__(highlight("%s" % (msg,)))
class NumbaPerformanceWarning(NumbaWarning):
"""
Warning category for when an operation might not be
as fast as expected.
"""
class NumbaDeprecationWarning(NumbaWarning):
"""
Warning category for use of a deprecated feature.
"""
class NumbaPendingDeprecationWarning(NumbaWarning):
"""
Warning category for use of a feature that is pending deprecation.
"""
class NumbaParallelSafetyWarning(NumbaWarning):
"""
Warning category for when an operation in a prange
might not have parallel semantics.
"""
class NumbaTypeSafetyWarning(NumbaWarning):
"""
Warning category for unsafe casting operations.
"""
class NumbaExperimentalFeatureWarning(NumbaWarning):
"""
Warning category for using an experimental feature.
"""
class NumbaInvalidConfigWarning(NumbaWarning):
"""
Warning category for using an invalid configuration.
"""
# These are needed in the color formatting of errors setup
class _ColorScheme(metaclass=abc.ABCMeta):
@abstractmethod
def code(self, msg):
pass
@abstractmethod
def errmsg(self, msg):
pass
@abstractmethod
def filename(self, msg):
pass
@abstractmethod
def indicate(self, msg):
pass
@abstractmethod
def highlight(self, msg):
pass
@abstractmethod
def reset(self, msg):
pass
class _DummyColorScheme(_ColorScheme):
def __init__(self, theme=None):
pass
def code(self, msg):
pass
def errmsg(self, msg):
pass
def filename(self, msg):
pass
def indicate(self, msg):
pass
def highlight(self, msg):
pass
def reset(self, msg):
pass
# holds reference to the instance of the terminal color scheme in use
_termcolor_inst = None
try:
import colorama
# If the colorama version is < 0.3.9 it can break stdout/stderr in some
# situations, as a result if this condition is met colorama is disabled and
# the user is warned. Note that early versions did not have a __version__.
colorama_version = getattr(colorama, '__version__', '0.0.0')
if tuple([int(x) for x in colorama_version.split('.')]) < (0, 3, 9):
msg = ("Insufficiently recent colorama version found. "
"Numba requires colorama >= 0.3.9")
# warn the user
warnings.warn(msg)
# trip the exception to disable color errors
raise ImportError
# If Numba is running in testsuite mode then do not use error message
# coloring so CI system output is consistently readable without having
# to read between shell escape characters.
if os.environ.get('NUMBA_DISABLE_ERROR_MESSAGE_HIGHLIGHTING', None):
raise ImportError # just to trigger the exception handler below
except ImportError:
class NOPColorScheme(_DummyColorScheme):
def __init__(self, theme=None):
if theme is not None:
raise ValueError("specifying a theme has no effect")
_DummyColorScheme.__init__(self, theme=theme)
def code(self, msg):
return msg
def errmsg(self, msg):
return msg
def filename(self, msg):
return msg
def indicate(self, msg):
return msg
def highlight(self, msg):
return msg
def reset(self, msg):
return msg
def termcolor():
global _termcolor_inst
if _termcolor_inst is None:
_termcolor_inst = NOPColorScheme()
return _termcolor_inst
else:
from colorama import init, reinit, deinit, Fore, Style
class ColorShell(object):
_has_initialized = False
def __init__(self):
init()
self._has_initialized = True
def __enter__(self):
if self._has_initialized:
reinit()
def __exit__(self, *exc_detail):
Style.RESET_ALL
deinit()
class reset_terminal(object):
def __init__(self):
self._buf = bytearray(b'')
def __enter__(self):
return self._buf
def __exit__(self, *exc_detail):
self._buf += bytearray(Style.RESET_ALL.encode('utf-8'))
# define some default themes, if more are added, update the envvars docs!
themes = {}
# No color added, just bold weighting
themes['no_color'] = {'code': None,
'errmsg': None,
'filename': None,
'indicate': None,
'highlight': None,
'reset': None, }
# suitable for terminals with a dark background
themes['dark_bg'] = {'code': Fore.BLUE,
'errmsg': Fore.YELLOW,
'filename': Fore.WHITE,
'indicate': Fore.GREEN,
'highlight': Fore.RED,
'reset': Style.RESET_ALL, }
# suitable for terminals with a light background
themes['light_bg'] = {'code': Fore.BLUE,
'errmsg': Fore.BLACK,
'filename': Fore.MAGENTA,
'indicate': Fore.BLACK,
'highlight': Fore.RED,
'reset': Style.RESET_ALL, }
# suitable for terminals with a blue background
themes['blue_bg'] = {'code': Fore.WHITE,
'errmsg': Fore.YELLOW,
'filename': Fore.MAGENTA,
'indicate': Fore.CYAN,
'highlight': Fore.RED,
'reset': Style.RESET_ALL, }
# suitable for use in jupyter notebooks
themes['jupyter_nb'] = {'code': Fore.BLACK,
'errmsg': Fore.BLACK,
'filename': Fore.GREEN,
'indicate': Fore.CYAN,
'highlight': Fore.RED,
'reset': Style.RESET_ALL, }
default_theme = themes['no_color']
class HighlightColorScheme(_DummyColorScheme):
def __init__(self, theme=default_theme):
self._code = theme['code']
self._errmsg = theme['errmsg']
self._filename = theme['filename']
self._indicate = theme['indicate']
self._highlight = theme['highlight']
self._reset = theme['reset']
_DummyColorScheme.__init__(self, theme=theme)
def _markup(self, msg, color=None, style=Style.BRIGHT):
features = ''
if color:
features += color
if style:
features += style
with ColorShell():
with reset_terminal() as mu:
mu += features.encode('utf-8')
mu += (msg).encode('utf-8')
return mu.decode('utf-8')
def code(self, msg):
return self._markup(msg, self._code)
def errmsg(self, msg):
return self._markup(msg, self._errmsg)
def filename(self, msg):
return self._markup(msg, self._filename)
def indicate(self, msg):
return self._markup(msg, self._indicate)
def highlight(self, msg):
return self._markup(msg, self._highlight)
def reset(self, msg):
return self._markup(msg, self._reset)
def termcolor():
global _termcolor_inst
if _termcolor_inst is None:
scheme = themes[numba.core.config.COLOR_SCHEME]
_termcolor_inst = HighlightColorScheme(scheme)
return _termcolor_inst
feedback_details = """
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
"""
unsupported_error_info = """
Unsupported functionality was found in the code Numba was trying to compile.
If this functionality is important to you please file a feature request at:
https://github.com/numba/numba/issues/new
"""
interpreter_error_info = """
Unsupported Python functionality was found in the code Numba was trying to
compile. This error could be due to invalid code, does the code work
without Numba? (To temporarily disable Numba JIT, set the `NUMBA_DISABLE_JIT`
environment variable to non-zero, and then rerun the code).
If the code is valid and the unsupported functionality is important to you
please file a feature request at: https://github.com/numba/numba/issues/new
To see Python/NumPy features supported by the latest release of Numba visit:
https://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
https://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
"""
constant_inference_info = """
Numba could not make a constant out of something that it decided should be
a constant. This could well be a current limitation in Numba's internals,
however please first check that your code is valid for compilation,
particularly with respect to string interpolation (not supported!) and
the requirement of compile time constants as arguments to exceptions:
https://numba.pydata.org/numba-doc/latest/reference/pysupported.html?highlight=exceptions#constructs
If the code is valid and the unsupported functionality is important to you
please file a feature request at: https://github.com/numba/numba/issues/new
If you think your code should work with Numba. %s
""" % feedback_details
typing_error_info = """
This is not usually a problem with Numba itself but instead often caused by
the use of unsupported features or an issue in resolving types.
To see Python/NumPy features supported by the latest release of Numba visit:
https://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
https://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
For more information about typing errors and how to debug them visit:
https://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#my-code-doesn-t-compile
If you think your code should work with Numba, please report the error message
and traceback, along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
"""
reportable_issue_info = """
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
You are currently using Numba version %s.
%s
""" % (numba.__version__, feedback_details)
error_extras = dict()
error_extras['unsupported_error'] = unsupported_error_info
error_extras['typing'] = typing_error_info
error_extras['reportable'] = reportable_issue_info
error_extras['interpreter'] = interpreter_error_info
error_extras['constant_inference'] = constant_inference_info
def deprecated(arg):
"""Define a deprecation decorator.
An optional string should refer to the new API to be used instead.
Example:
@deprecated
def old_func(): ...
@deprecated('new_func')
def old_func(): ..."""
subst = arg if isinstance(arg, str) else None
def decorator(func):
def wrapper(*args, **kwargs):
msg = "Call to deprecated function \"{}\"."
if subst:
msg += "\n Use \"{}\" instead."
warnings.warn(msg.format(func.__name__, subst),
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wraps(func)(wrapper)
if not subst:
return decorator(arg)
else:
return decorator
class WarningsFixer(object):
"""
An object "fixing" warnings of a given category caught during
certain phases. The warnings can have their filename and lineno fixed,
and they are deduplicated as well.
"""
def __init__(self, category):
self._category = category
# {(filename, lineno, category) -> messages}
self._warnings = defaultdict(set)
@contextlib.contextmanager
def catch_warnings(self, filename=None, lineno=None):
"""
Store warnings and optionally fix their filename and lineno.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always', self._category)
yield
for w in wlist:
msg = str(w.message)
if issubclass(w.category, self._category):
# Store warnings of this category for deduplication
filename = filename or w.filename
lineno = lineno or w.lineno
self._warnings[filename, lineno, w.category].add(msg)
else:
# Simply emit other warnings again
warnings.warn_explicit(msg, w.category,
w.filename, w.lineno)
def flush(self):
"""
Emit all stored warnings.
"""
def key(arg):
# It is possible through codegen to create entirely identical
# warnings, this leads to comparing types when sorting which breaks
# on Python 3. Key as str() and if the worse happens then `id`
# creates some uniqueness
return str(arg) + str(id(arg))
for (filename, lineno, category), messages in sorted(
self._warnings.items(), key=key):
for msg in sorted(messages):
warnings.warn_explicit(msg, category, filename, lineno)
self._warnings.clear()
class NumbaError(Exception):
def __init__(self, msg, loc=None, highlighting=True):
self.msg = msg
self.loc = loc
if highlighting:
highlight = termcolor().errmsg
else:
def highlight(x):
return x
if loc:
new_msg = "%s\n%s\n" % (msg, loc.strformat())
else:
new_msg = "%s" % (msg,)
super(NumbaError, self).__init__(highlight(new_msg))
@property
def contexts(self):
try:
return self._contexts
except AttributeError:
self._contexts = lst = []
return lst
def add_context(self, msg):
"""
Add contextual info. The exception message is expanded with the new
contextual information.
"""
self.contexts.append(msg)
f = termcolor().errmsg('{0}\n') + termcolor().filename('During: {1}')
newmsg = f.format(self, msg)
self.args = (newmsg,)
return self
def patch_message(self, new_message):
"""
Change the error message to the given new message.
"""
self.args = (new_message,) + self.args[1:]
class UnsupportedError(NumbaError):
"""
Numba does not have an implementation for this functionality.
"""
pass
class UnsupportedRewriteError(UnsupportedError):
"""UnsupportedError from rewrite passes
"""
pass
class IRError(NumbaError):
"""
An error occurred during Numba IR generation.
"""
pass
class RedefinedError(IRError):
"""
An error occurred during interpretation of IR due to variable redefinition.
"""
pass
class NotDefinedError(IRError):
"""
An undefined variable is encountered during interpretation of IR.
"""
def __init__(self, name, loc=None):
self.name = name
msg = ("The compiler failed to analyze the bytecode. "
"Variable '%s' is not defined." % name)
super(NotDefinedError, self).__init__(msg, loc=loc)
class VerificationError(IRError):
"""
An error occurred during IR verification. Once Numba's internal
representation (IR) is constructed it is then verified to ensure that
terminators are both present and in the correct places within the IR. If
it is the case that this condition is not met, a VerificationError is
raised.
"""
pass
class DeprecationError(NumbaError):
"""
Functionality is deprecated.
"""
pass
class LoweringError(NumbaError):
"""
An error occurred during lowering.
"""
def __init__(self, msg, loc=None):
super(LoweringError, self).__init__(msg, loc=loc)
class UnsupportedParforsError(NumbaError):
"""
An error ocurred because parfors is not supported on the platform.
"""
pass
class ForbiddenConstruct(LoweringError):
"""
A forbidden Python construct was encountered (e.g. use of locals()).
"""
pass
class TypingError(NumbaError):
"""
A type inference failure.
"""
pass
class UntypedAttributeError(TypingError):
def __init__(self, value, attr, loc=None):
module = getattr(value, 'pymod', None)
if module is not None and module == np:
# unsupported numpy feature.
msg = ("Use of unsupported NumPy function 'numpy.%s' "
"or unsupported use of the function.") % attr
else:
msg = "Unknown attribute '{attr}' of type {type}"
msg = msg.format(type=value, attr=attr)
super(UntypedAttributeError, self).__init__(msg, loc=loc)
class ByteCodeSupportError(NumbaError):
"""
Failure to extract the bytecode of the user's function.
"""
def __init__(self, msg, loc=None):
super(ByteCodeSupportError, self).__init__(msg, loc=loc)
class CompilerError(NumbaError):
"""
Some high-level error in the compiler.
"""
pass
class ConstantInferenceError(NumbaError):
"""
Failure during constant inference.
"""
def __init__(self, value, loc=None):
super(ConstantInferenceError, self).__init__(value, loc=loc)
class InternalError(NumbaError):
"""
For wrapping internal error occured within the compiler
"""
def __init__(self, exception):
super(InternalError, self).__init__(str(exception))
self.old_exception = exception
class RequireLiteralValue(TypingError):
"""
For signalling that a function's typing requires a constant value for
some of its arguments.
"""
pass
class ForceLiteralArg(NumbaError):
"""A Pseudo-exception to signal the dispatcher to type an argument literally
Attributes
----------
requested_args : frozenset[int]
requested positions of the arguments.
"""
def __init__(self, arg_indices, fold_arguments=None, loc=None):
"""
Parameters
----------
arg_indices : Sequence[int]
requested positions of the arguments.
fold_arguments: callable
A function ``(tuple, dict) -> tuple`` that binds and flattens
the ``args`` and ``kwargs``.
loc : numba.ir.Loc or None
"""
super(ForceLiteralArg, self).__init__(
"Pseudo-exception to force literal arguments in the dispatcher",
loc=loc,
)
self.requested_args = frozenset(arg_indices)
self.fold_arguments = fold_arguments
def bind_fold_arguments(self, fold_arguments):
"""Bind the fold_arguments function
"""
e = ForceLiteralArg(self.requested_args, fold_arguments,
loc=self.loc)
return chain_exception(e, self)
def combine(self, other):
"""Returns a new instance by or'ing the requested_args.
"""
if not isinstance(other, ForceLiteralArg):
m = '*other* must be a {} but got a {} instead'
raise TypeError(m.format(ForceLiteralArg, type(other)))
return ForceLiteralArg(self.requested_args | other.requested_args)
def __or__(self, other):
"""Same as self.combine(other)
"""
return self.combine(other)
class LiteralTypingError(TypingError):
"""
Failure in typing a Literal type
"""
pass
def _format_msg(fmt, args, kwargs):
return fmt.format(*args, **kwargs)
_numba_path = os.path.dirname(__file__)
loc_info = {}
@contextlib.contextmanager
def new_error_context(fmt_, *args, **kwargs):
"""
A contextmanager that prepend contextual information to any exception
raised within. If the exception type is not an instance of NumbaError,
it will be wrapped into a InternalError. The exception class can be
changed by providing a "errcls_" keyword argument with the exception
constructor.
The first argument is a message that describes the context. It can be a
format string. If there are additional arguments, it will be used as
``fmt_.format(*args, **kwargs)`` to produce the final message string.
"""
errcls = kwargs.pop('errcls_', InternalError)
loc = kwargs.get('loc', None)
if loc is not None and not loc.filename.startswith(_numba_path):
loc_info.update(kwargs)
try:
yield
except NumbaError as e:
e.add_context(_format_msg(fmt_, args, kwargs))
raise
except AssertionError:
# Let assertion error pass through for shorter traceback in debugging
raise
except Exception as e:
newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
raise newerr.with_traceback(tb)
__all__ += [name for (name, value) in globals().items()
if not name.startswith('_') and isinstance(value, type)
and issubclass(value, (Exception, Warning))]
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Eduard Broecker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# this script imports dbf-files in a canmatrix-object
# dbf-files are the can-matrix-definitions of the busmaster-project (http://rbei-etas.github.io/busmaster/)
#
from __future__ import absolute_import, division, print_function
import copy
import decimal
import logging
import math
import re
import typing
from builtins import *
import canmatrix
logger = logging.getLogger(__name__)
def default_float_factory(value): # type: (typing.Any) -> decimal.Decimal
return decimal.Decimal(value)
# TODO support for [START_PARAM_NODE_RX_SIG]
# TODO support for [START_PARAM_NODE_TX_MSG]
def decode_define(line): # type: (str) -> typing.Tuple[str, str, str]
(define, value_type, value) = line.split(',', 2)
value_type = value_type.strip()
if value_type == "INT" or value_type == "HEX":
(Min, Max, default) = value.split(',', 2)
my_def = value_type + ' ' + Min.strip() + ' ' + Max.strip()
default = default.strip()
elif value_type == "ENUM":
(enums, default) = value.rsplit(',', 1)
my_def = value_type + " " + enums[1:]
elif value_type == "STRING":
my_def = value_type
default = value
else:
logger.debug(line)
return define[1:-1], my_def, default
def load(f, **options): # type: (typing.IO, **typing.Any) -> canmatrix.CanMatrix
dbf_import_encoding = options.get("dbfImportEncoding", 'iso-8859-1')
float_factory = options.get('float_factory', default_float_factory)
is_j1939 = False
db = canmatrix.CanMatrix()
mode = ''
for line in f:
line = line.decode(dbf_import_encoding).strip()
if mode == 'SignalDescription':
if line.startswith(
"[END_DESC_SIG]") or line.startswith("[END_DESC]"):
mode = ''
else:
(bo_id, tem_s, signal_name, comment) = line.split(' ', 3)
comment = comment.replace('"', '').replace(';', '')
db.frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(bo_id))).signal_by_name(
signal_name).add_comment(comment)
if mode == 'BUDescription':
if line.startswith(
"[END_DESC_NODE]") or line.startswith("[END_DESC]"):
mode = ''
else:
(bu_name, comment) = line.split(' ', 1)
comment = comment.replace('"', '').replace(';', '')
db.ecu_by_name(bu_name).add_comment(comment)
if mode == 'FrameDescription':
if line.startswith(
"[END_DESC_MSG]") or line.startswith("[END_DESC]"):
mode = ''
else:
(bo_id, tem_s, comment) = line.split(' ', 2)
comment = comment.replace('"', '').replace(';', '')
frame = db.frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(bo_id)))
if frame:
frame.add_comment(comment)
elif mode == 'ParamMsgVal':
if line.startswith("[END_PARAM_MSG_VAL]"):
mode = ''
else:
(bo_id, tem_s, attrib, value) = line.split(',', 3)
db.frame_by_id(canmatrix.ArbitrationId.from_compound_integer(
int(bo_id))).add_attribute(
attrib.replace('"', ''),
value.replace('"', ''))
elif mode == 'ParamNodeVal':
if line.startswith("[END_PARAM_NODE_VAL]"):
mode = ''
else:
(bu, attrib, value) = line.split(',', 2)
db.ecu_by_name(bu).add_attribute(
attrib.replace('"', ''), value[1:-1])
elif mode == 'ParamNetVal':
if line.startswith("[END_PARAM_NET_VAL]"):
mode = ''
else:
(attrib, value) = line.split(',', 1)
db.add_attribute(attrib.replace('"', ''), value[1:-1])
elif mode == 'ParamSigVal':
if line.startswith("[END_PARAM_SIG_VAL]"):
mode = ''
else:
(bo_id, tem_s, signal_name, attrib, value) = line.split(',', 4)
db.frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(bo_id)))\
.signal_by_name(signal_name)\
.add_attribute(attrib.replace('"', ''), value[1:-1])
elif mode == 'ParamSig':
if line.startswith("[END_PARAM_SIG]"):
mode = ''
else:
(name, define, default) = decode_define(line)
db.add_signal_defines(name, define)
db.add_define_default(name, default)
elif mode == 'ParamMsg':
if line.startswith("[END_PARAM_MSG]"):
mode = ''
else:
(name, define, default) = decode_define(line)
db.add_frame_defines(name, define)
db.add_define_default(name, default)
elif mode == 'ParamNode':
if line.startswith("[END_PARAM_NODE]"):
mode = ''
else:
(name, define, default) = decode_define(line)
db.add_ecu_defines(name, define)
db.add_define_default(name, default)
elif mode == 'ParamNet':
if line.startswith("[END_PARAM_NET]"):
mode = ''
else:
(name, define, default) = decode_define(line)
db.add_global_defines(name, define)
db.add_define_default(name, default)
else:
if line.startswith("[PROTOCOL]") and "J1939" in line:
is_j1939 = True
if line.startswith("[START_DESC_SIG]"):
mode = 'SignalDescription'
if line.startswith("[START_DESC_MSG]"):
mode = 'FrameDescription'
if line.startswith("[START_DESC_NODE]"):
mode = 'BUDescription'
if line.startswith("[START_PARAM_NODE_VAL]"):
mode = 'ParamNodeVal'
if line.startswith("[START_PARAM_NET_VAL]"):
mode = 'ParamNetVal'
if line.startswith("[START_PARAM_MSG_VAL]"):
mode = 'ParamMsgVal'
if line.startswith("[START_PARAM_SIG_VAL]"):
mode = 'ParamSigVal'
if line.startswith("[START_PARAM_SIG]"):
mode = 'ParamSig'
if line.startswith("[START_PARAM_MSG]"):
mode = 'ParamMsg'
if line.startswith("[START_PARAM_NODE]"):
mode = 'ParamNode'
if line.startswith("[START_PARAM_NET]"):
mode = 'ParamNet'
if line.startswith("[START_MSG]"):
temp_str = line.strip()[11:].strip()
temp_array = temp_str.split(',')
(name, arb_id, size, n_signals, dummy) = temp_array[0:5]
if len(temp_array) > 5:
extended = temp_array[5]
else:
extended = None
if len(temp_array) > 6:
transmitters = temp_array[6].split()
else:
transmitters = list()
new_frame = db.add_frame(
canmatrix.Frame(
name,
size=int(size),
transmitters=transmitters))
if is_j1939:
new_frame.arbitration_id.pgn = int(arb_id)
else:
new_frame.arbitration_id = canmatrix.ArbitrationId.from_compound_integer(int(arb_id))
# Frame(int(Id), name, size, transmitter))
if extended == 'X':
logger.debug("Extended")
new_frame.arbitration_id.extended = True
if line.startswith("[NODE]"):
temp_str = line.strip()[6:].strip()
bo_list = temp_str.split(',')
for bo in bo_list:
db.add_ecu(canmatrix.Ecu(bo))
if line.startswith("[START_SIGNALS]"):
temp_str = line.strip()[15:].strip()
temp_array = temp_str.split(',')
(name, size, start_byte, start_bit, sign, max_val, min_val, byteorder,
offset, factor, unit, multiplex) = temp_array[0:12]
min_val = float_factory(min_val)
max_val = float_factory(max_val)
factor = float_factory(factor)
offset = float_factory(offset)
if len(temp_array) > 12:
receiver = temp_array[12].split(',')
else:
receiver = []
if multiplex == 'M':
multiplex = 'Multiplexor'
elif len(multiplex.strip()) > 0:
multiplex = int(multiplex[1:])
else:
multiplex = None
is_float = False
is_signed = False
if sign == "U":
is_signed = False
elif sign == "F" or sign == "D":
is_float = True
else:
is_signed = True
start_bit = int(start_bit)
start_bit += (int(start_byte) - 1) * 8
new_signal = new_frame.add_signal(
canmatrix.Signal(
name,
start_bit=int(start_bit),
size=int(size),
is_little_endian=(int(byteorder) == 1),
is_signed=is_signed,
factor=factor,
offset=offset,
min=min_val * factor,
max=max_val * factor,
unit=unit,
receivers=receiver,
is_float=is_float,
multiplex=multiplex))
if int(byteorder) == 0:
# this is dummy here, because internal lsb is default - for now
new_signal.set_startbit(
start_bit, bitNumbering=1, startLittle=True)
if line.startswith("[VALUE_DESCRIPTION]"):
temp_str = line.strip()[19:].strip()
regexp = re.compile("\"(.+)\" *, *(.+)")
temp = regexp.match(temp_str)
if temp:
new_signal.add_values(value=temp.group(2), valueName=temp.group(1))
for frame in db.frames:
# receiver is only given in the signals, so do propagate the receiver to the frame:
frame.update_receiver()
db.enum_attribs_to_values()
free_signals_dummy_frame = db.frame_by_name("VECTOR__INDEPENDENT_SIG_MSG")
if free_signals_dummy_frame is not None and free_signals_dummy_frame.arbitration_id == 0x40000000:
db.signals = free_signals_dummy_frame.signals
db.del_frame(free_signals_dummy_frame)
return db
def dump(mydb, f, **options):
# type: (canmatrix.CanMatrix, typing.IO, **typing.Any) -> None
# create copy because export changes database
db = copy.deepcopy(mydb)
dbf_export_encoding = options.get("dbfExportEncoding", 'iso-8859-1')
ignore_encoding_errors = options.get("ignoreEncodingErrors", "strict")
db.enum_attribs_to_keys()
if len(db.signals) > 0:
free_signals_dummy_frame = canmatrix.Frame("VECTOR__INDEPENDENT_SIG_MSG")
free_signals_dummy_frame.arbitration_id = canmatrix.ArbitrationId(id=0x40000000, extended=True)
free_signals_dummy_frame.signals = db.signals
db.add_frame(free_signals_dummy_frame)
out_str = """//******************************BUSMASTER Messages and signals Database ******************************//
[DATABASE_VERSION] 1.3
[PROTOCOL] CAN
[BUSMASTER_VERSION] [3.2.2]
[NUMBER_OF_MESSAGES] """
out_str += str(len(db.frames)) + "\n"
cycle_times_of_all_frames = [x.cycle_time for x in db.frames]
if len(cycle_times_of_all_frames) > 0 and max(cycle_times_of_all_frames ) > 0:
db.add_frame_defines("GenMsgCycleTime", 'INT 0 65535')
cycle_times_of_all_singals = [x.cycle_time for y in db.frames for x in y.signals]
if len(cycle_times_of_all_singals) > 0 and max(cycle_times_of_all_singals) > 0:
db.add_signal_defines("GenSigCycleTime", 'INT 0 65535')
initial_values_of_all_singals = [x.initial_value for y in db.frames for x in y.signals]
if len(initial_values_of_all_singals) > 0 and (max(initial_values_of_all_singals) > 0 or min(initial_values_of_all_singals)) < 0:
db.add_signal_defines("GenSigStartValue", 'FLOAT 0 100000000000')
# Frames
for frame in db.frames:
if frame.is_complex_multiplexed:
logger.error("export complex multiplexers is not supported - ignoring frame " + frame.name)
continue
# Name unMsgId m_ucLength m_ucNumOfSignals m_cDataFormat m_cFrameFormat? m_txNode
# m_cDataFormat Data format: 1-Intel, 0-Motorola.
# cFrameFormat Standard 'S' Extended 'X'
extended = 'X' if frame.arbitration_id.extended == 1 else 'S'
out_str += "[START_MSG] " + frame.name + \
",%d,%d,%d,0,%c," % (frame.arbitration_id.id, frame.size, len(frame.signals), extended)
if not frame.transmitters:
frame.add_transmitter("Vector__XXX")
# DBF does not support multiple Transmitters
out_str += frame.transmitters[0] + "\n"
for signal in frame.signals:
# m_acName ucLength m_ucWhichByte m_ucStartBit
# m_ucDataFormat m_fOffset m_fScaleFactor m_acUnit m_acMultiplex m_rxNode
# m_ucDataFormat
which_byte = int(
math.floor(
signal.get_startbit(
bit_numbering=1,
start_little=True) /
8) +
1)
sign = 'I'
if not signal.is_signed:
sign = 'U'
if signal.is_float:
if signal.size > 32:
sign = 'D'
else:
sign = 'F'
if signal.factor == 0:
signal.factor = 1
out_str += "[START_SIGNALS] " + signal.name \
+ ",%d,%d,%d,%c," % (signal.size,
which_byte,
int(signal.get_startbit(bit_numbering=1,
start_little=True)) % 8,
sign) + '{:g},{:g}'.format(float(signal.max) / float(signal.factor),
float(signal.min) / float(signal.factor))
out_str += ",%d,%s,%s" % (signal.is_little_endian, signal.offset, signal.factor)
multiplex = ""
if signal.multiplex is not None:
if signal.multiplex == 'Multiplexor':
multiplex = 'M'
else:
multiplex = 'm' + str(signal.multiplex)
out_str += "," + signal.unit + ",%s," % multiplex + \
','.join(signal.receivers) + "\n"
if len(signal.values) > 0:
for value, name in sorted(list(signal.values.items())):
out_str += '[VALUE_DESCRIPTION] "' + \
name + '",' + str(value) + '\n'
out_str += "[END_MSG]\n\n"
# Board units
out_str += "[NODE] "
count = 1
for ecu in db.ecus:
out_str += ecu.name
if count < len(db.ecus):
out_str += ","
count += 1
out_str += "\n"
out_str += "[START_DESC]\n\n"
# BU-descriptions
out_str += "[START_DESC_MSG]\n"
for frame in db.frames:
if frame.comment is not None:
comment = frame.comment.replace("\n", " ")
out_str += str(frame.arbitration_id.id) + ' S "' + comment + '";\n'
out_str += "[END_DESC_MSG]\n"
# Frame descriptions
out_str += "[START_DESC_NODE]\n"
for ecu in db.ecus:
if ecu.comment is not None:
comment = ecu.comment.replace("\n", " ")
out_str += ecu.name + ' "' + comment + '";\n'
out_str += "[END_DESC_NODE]\n"
# signal descriptions
out_str += "[START_DESC_SIG]\n"
for frame in db.frames:
if frame.is_complex_multiplexed:
continue
for signal in frame.signals:
if signal.comment is not None:
comment = signal.comment.replace("\n", " ")
out_str += "%d S " % frame.arbitration_id.id + signal.name + ' "' + comment + '";\n'
out_str += "[END_DESC_SIG]\n"
out_str += "[END_DESC]\n\n"
out_str += "[START_PARAM]\n"
# db-parameter
out_str += "[START_PARAM_NET]\n"
for (data_type, define) in sorted(list(db.global_defines.items())):
default_val = define.defaultValue
if default_val is None:
default_val = "0"
out_str += '"' + data_type + '",' + define.definition.replace(' ', ',') + ',' + default_val + '\n'
out_str += "[END_PARAM_NET]\n"
# bu-parameter
out_str += "[START_PARAM_NODE]\n"
for (data_type, define) in sorted(list(db.ecu_defines.items())):
default_val = define.defaultValue
if default_val is None:
default_val = "0"
out_str += '"' + data_type + '",' + define.definition.replace(' ', ',') + ',' + default_val + '\n'
out_str += "[END_PARAM_NODE]\n"
# frame-parameter
out_str += "[START_PARAM_MSG]\n"
for (data_type, define) in sorted(list(db.frame_defines.items())):
default_val = define.defaultValue
if default_val is None:
default_val = "0"
out_str += '"' + data_type + '",' + define.definition.replace(' ', ',') + '\n' # + ',' + default_val + '\n'
out_str += "[END_PARAM_MSG]\n"
# signal-parameter
out_str += "[START_PARAM_SIG]\n"
for (data_type, define) in list(db.signal_defines.items()):
default_val = define.defaultValue
if default_val is None:
default_val = "0"
out_str += '"' + data_type + '",' + define.definition.replace(' ', ',') + ',' + default_val + '\n'
out_str += "[END_PARAM_SIG]\n"
out_str += "[START_PARAM_VAL]\n"
# board unit attributes:
out_str += "[START_PARAM_NODE_VAL]\n"
for ecu in db.ecus:
for attrib, val in sorted(list(ecu.attributes.items())):
out_str += ecu.name + ',"' + attrib + '","' + val + '"\n'
out_str += "[END_PARAM_NODE_VAL]\n"
# messages-attributes:
out_str += "[START_PARAM_MSG_VAL]\n"
for frame in db.frames:
if frame.is_complex_multiplexed:
continue
for attrib, val in sorted(list(frame.attributes.items())):
out_str += str(frame.arbitration_id.id) + ',S,"' + attrib + '","' + val + '"\n'
out_str += "[END_PARAM_MSG_VAL]\n"
# signal-attributes:
out_str += "[START_PARAM_SIG_VAL]\n"
for frame in db.frames:
if frame.is_complex_multiplexed:
continue
for signal in frame.signals:
for attrib, val in sorted(list(signal.attributes.items())):
out_str += str(frame.arbitration_id.id) + ',S,' + signal.name + \
',"' + attrib + '","' + val + '"\n'
out_str += "[END_PARAM_SIG_VAL]\n"
out_str += "[END_PARAM_VAL]\n"
f.write(out_str.encode(dbf_export_encoding, ignore_encoding_errors))
| |
import math
from direct.showbase.DirectObject import DirectObject
from direct.interval.LerpInterval import LerpPosInterval
import panda3d.core as p3d
from hud import Hud
from weapon import Weapon
def clamp(value, lower, upper):
return max(min(value, upper), lower)
class PlayerController(DirectObject):
BUY_DISTANCE = 50
PLAYER_WALK_SPEED = 20
PLAYER_SPRINT_SPEED = 50
CAMERA_DISTANCE = 6
CAMERA_HEIGHT = 1.25
CAMERA_SPRINT_SCALE = 1.2
def __init__(self, player):
DirectObject.__init__(self)
self.player = player
self.hud = Hud()
halfx = base.win.get_x_size() / 2
halfy = base.win.get_y_size() / 2
base.win.move_pointer(0, halfx, halfy)
mx_sens_config = p3d.ConfigVariableInt('mousex-sensitivity')
self.mousex_sensitivity = mx_sens_config.get_value() * 10.0
my_sens_config = p3d.ConfigVariableInt('mousey-sensitivity')
self.mousey_sensitivity = my_sens_config.get_value() / 10.0
# Camera setup
self.camera_pitch = 0
self.camera_heading = 0
self.camera_pivot = base.render.attach_new_node('camera_pivot')
pos = self.camera_pivot.get_pos()
pos.z += self.CAMERA_HEIGHT
self.camera_pivot.set_pos(pos)
base.camera.reparent_to(self.camera_pivot)
pos = base.camera.get_pos()
pos.y -= self.CAMERA_DISTANCE
self.sprint_camera_interval = LerpPosInterval(
base.camera,
0,
pos)
self.sprint_camera_interval.start()
self.player_movement = p3d.LVector3(0, 0, 0)
self.player_speed = self.PLAYER_WALK_SPEED
# Building acquisition
self.in_buy_mode = False
self.current_building = None
self.resources = {
'ALPHA': 0,
'BETA': 0,
'GAMMA': 0,
'EMPTY': 0,
}
self.left_weapon = Weapon('rifle')
self.right_weapon = Weapon('melee')
self.accept('move_forward', self.update_movement, ['forward', True])
self.accept('move_forward-up', self.update_movement, ['forward', False])
self.accept('move_backward', self.update_movement, ['backward', True])
self.accept('move_backward-up', self.update_movement, ['backward', False])
self.accept('move_left', self.update_movement, ['left', True])
self.accept('move_left-up', self.update_movement, ['left', False])
self.accept('move_right', self.update_movement, ['right', True])
self.accept('move_right-up', self.update_movement, ['right', False])
self.accept('jump', self.jump)
self.accept('sprint', self.sprint, [True])
self.accept('sprint-up', self.sprint, [False])
self.accept('left_fire', self.fire, [self.left_weapon])
self.accept('right_fire', self.fire, [self.right_weapon])
self.accept('purchase', self.purchase_building)
self.accept('buy_mode', lambda: setattr(self, 'in_buy_mode', True))
self.accept('buy_mode-up', lambda: setattr(self, 'in_buy_mode', False))
# DEBUG PC HEALTH
def damage_health():
base.messenger.send('character_hit', [self.player.id])
self.accept('pc_health', damage_health)
def destroy(self):
self.ignoreAll()
self.hud.destroy()
self.player.destroy()
@property
def is_dead(self):
return self.player.hp <= 0
def update_movement(self, direction, activate):
move_delta = p3d.LVector3(0, 0, 0)
if direction == 'forward':
move_delta.set_y(1)
elif direction == 'backward':
move_delta.set_y(-1)
elif direction == 'left':
move_delta.set_x(-1)
elif direction == 'right':
move_delta.set_x(1)
if not activate:
move_delta *= -1
self.player_movement += move_delta
def jump(self):
if self.player.is_on_ground():
self.player.do_jump()
def sprint(self, activate):
if activate:
self.player_speed = self.PLAYER_SPRINT_SPEED
cam_distance_target = -self.CAMERA_DISTANCE * self.CAMERA_SPRINT_SCALE
else:
self.player_speed = self.PLAYER_WALK_SPEED
cam_distance_target = -self.CAMERA_DISTANCE
# Move the camera smoothly
# a = abs(cam_distance_target - base.camera.get_pos().get_y())
# b = float(self.CAMERA_DISTANCE * self.CAMERA_SPRINT_SCALE) - self.CAMERA_DISTANCE
# factor = a / b
# print(base.camera.get_pos().get_y(), cam_distance_target, factor)
# self.sprint_camera_interval.finish()
# self.sprint_camera_interval = LerpPosInterval(
# base.camera,
# 0.4 * factor,
# p3d.Point3(0, cam_distance_target, self.CAMERA_HEIGHT),
# other=self.player.nodepath
# )
# self.sprint_camera_interval.start()
def _get_object_at_cursor(self, distance):
from_point = p3d.Point3()
to_point = p3d.Point3()
far_point = p3d.Point3()
base.camLens.extrude(p3d.Point2(0, 0), from_point, to_point)
from_point = self.player.get_pos()
from_point.z += self.player.half_height * 0.5
to_point = base.render.get_relative_point(base.camera, to_point)
to_point -= from_point
to_point.normalize()
to_point *= distance
to_point += from_point
result = base.physics_world.ray_test_closest(from_point, to_point)
# Draw debug lines
#lineseg = p3d.LineSegs('debug ray')
#lineseg.reset()
#lineseg.move_to(from_point)
#lineseg.draw_to(to_point)
#debug_line = lineseg.create(False)
#base.render.attach_new_node(debug_line)
if result.has_hit():
return result.get_node()
else:
return None
def fire(self, weapon):
node = self._get_object_at_cursor(weapon.range)
if (node and node.get_python_tag('character_id')):
cid = node.get_python_tag('character_id')
base.messenger.send('character_hit', [cid])
def purchase_building(self):
if self.in_buy_mode and self.current_building:
print('BUYING', self.current_building.resource)
self.current_building.owner = 'PLAYER'
self.current_building.nodepath.set_color_scale(0, 0, 10, 1)
self.resources[self.current_building.resource] += 1
self.current_building = None
def update(self, task):
# Update movement
movement = p3d.LVector3(self.player_movement)
movement.normalize()
movement = base.camera.getMat(base.render).xformVec(movement)
movement[2] = 0
movement.normalize()
movement *= self.player_speed
self.player.set_linear_movement(movement, local=False)
# if movement.length() != 0.0:
# desired_heading = -math.degrees(math.atan2(movement.x, movement.y))
cam_vec = base.camera.getMat(base.render).xformVec(p3d.LVector3f(0, 1, 0))
desired_heading = -math.degrees(math.atan2(cam_vec.x, cam_vec.y))
current_heading = self.player.nodepath.get_h()
alpha = ((desired_heading-current_heading) + 180) % 360 - 180
heading = current_heading + alpha * 0.25
self.player.nodepath.set_h(heading)
# Mouse movement
if base.mouseWatcherNode.has_mouse():
mouse = base.mouseWatcherNode.get_mouse()
halfx = base.win.get_x_size() / 2
halfy = base.win.get_y_size() / 2
base.win.move_pointer(0, halfx, halfy)
#self.player.set_angular_movement(-mouse.x * self.mousex_sensitivity)
self.camera_pitch += mouse.y * self.mousey_sensitivity
self.camera_pitch = clamp(self.camera_pitch, -75, 75)
self.camera_heading += -mouse.x * self.mousex_sensitivity * 0.025
# Update the camera
self.camera_pivot.set_p(self.camera_pitch)
self.camera_pivot.set_h(self.camera_heading)
pos = self.player.nodepath.get_pos()
pos[2] += self.CAMERA_HEIGHT
pos_delta = pos - self.camera_pivot.get_pos()
self.camera_pivot.set_pos(self.camera_pivot.get_pos() + pos_delta * 0.25)
# Highlight buildings when in buy_mode:
if self.in_buy_mode:
result = self._get_object_at_cursor(self.BUY_DISTANCE)
building = None
if result and result.get_python_tag('building'):
building = result.get_python_tag('building')
if building and not building.owner and building != self.current_building:
if self.current_building:
self.current_building.nodepath.clear_color_scale()
self.current_building = building
self.current_building.nodepath.set_color_scale(2, 0, 0, 1)
print("BUY {}?".format(building.resource))
elif self.current_building:
self.current_building.nodepath.clear_color_scale()
self.current_building = None
self.hud.update(self)
return task.cont
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for all_reduce."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.distribute import all_reduce as ar
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class AllReduceTest(test_util.TensorFlowTestCase):
def testFlattenTensorsShapesDefined(self):
x = array_ops.placeholder(types_pb2.DT_FLOAT, [None])
with self.assertRaisesRegexp(ValueError,
"must have statically known shape"):
ar._flatten_tensors([x, x])
def testRingPermutations(self):
# 0 devices
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 0, [])
self.assertEqual(pred_by_c_d, [])
self.assertEqual(rank_by_c_d, [])
# 1 worker, 1 subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])
self.assertEqual(pred_by_c_d, [[0]])
self.assertEqual(rank_by_c_d, [[0]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[2, 0, 1]])
self.assertEqual(rank_by_c_d, [[0, 1, 2]])
# multiple workers, 1 subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[5, 0, 1, 2, 3, 4]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(3, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[8, 0, 1, 2, 3, 4, 5, 6, 7]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7, 8]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [2, 1, 0])
self.assertEqual(pred_by_c_d, [[1, 2, 3, 4, 5, 0]])
self.assertEqual(rank_by_c_d, [[2, 1, 0, 5, 4, 3]])
# 1 worker, multiple subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [2, 3, 0, 1]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 4, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2],
[3, 0, 1, 2], [3, 0, 1, 2]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [3, 0, 1, 2],
[2, 3, 0, 1], [1, 2, 3, 0]])
# multiple worker, multiple subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[7, 0, 1, 2, 3, 4, 5, 6],
[3, 0, 5, 2, 7, 4, 1, 6]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7],
[2, 3, 0, 1, 6, 7, 4, 5]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 3, 2, 1])
self.assertEqual(pred_by_c_d, [[5, 2, 3, 0, 1, 6, 7, 4],
[1, 2, 7, 0, 5, 6, 3, 4]])
self.assertEqual(rank_by_c_d, [[0, 3, 2, 1, 4, 7, 6, 5],
[2, 1, 0, 3, 6, 5, 4, 7]])
def _buildInput(self, num_workers, num_gpus):
t8 = constant_op.constant(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
types_pb2.DT_FLOAT)
input_tensors = []
device_names = []
for w in range(0, num_workers):
for d in range(0, num_gpus):
dn = "/replica:0/task:%d/device:GPU:%d" % (w, d % num_gpus)
device_names.append(dn)
with ops.device(dn):
input_tensors.append(array_ops.identity(t8))
return input_tensors, device_names
def testBuildRingGatherPassStructure(self):
# 1 worker, 1 device
input_tensors, device_names = self._buildInput(1, 1)
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])
output_tensors = ar._build_ring_gather(input_tensors, device_names, 1,
pred_by_c_d, rank_by_c_d,
math_ops.add)
self.assertEqual(output_tensors, input_tensors)
# 1 worker, 4 devices, 2 subchunks
input_tensors, device_names = self._buildInput(1, 4)
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])
output_tensors, pad_len = ar._build_ring_gather(
input_tensors, device_names, 2, pred_by_c_d, rank_by_c_d, math_ops.add)
self.assertEqual(0, pad_len)
# same number outputs as inputs
self.assertEqual(len(output_tensors), len(input_tensors))
num_chunks = 2 * len(input_tensors)
tlen = tensor_shape.dimension_value(input_tensors[0].shape[0])
for otl in output_tensors:
self.assertEqual(len(otl), num_chunks)
for ot in otl:
self.assertEqual(ot.shape, [tlen/num_chunks])
def _buildInitialVars(self, shape, dev_list):
values = []
num_devices = len(dev_list)
dim = np.prod(shape) if shape else 1
for d in range(0, num_devices):
with ops.device(dev_list[d]):
npt = np.zeros(shape).astype(np.float32)
alias = np.frombuffer(npt.data, dtype=np.float32)
for i in range(0, dim):
alias[i] = i + 0.01 * d
var = state_ops.variable_op(shape, types_pb2.DT_FLOAT)
state_ops.init_variable(var, npt).op.run()
values.append(var)
return values
# pylint: disable=g-long-lambda
def _buildRing(self, num_workers, num_gpus, subdiv):
gpu_perm = range(0, num_gpus)
return lambda x, un_op: ar.build_ring_all_reduce(
x, num_workers, subdiv, gpu_perm, math_ops.add, un_op)
def _testAllReduce(self, num_workers, num_gpus, shape, build_f):
# Use local CPU as device for all inputs.
num_devices = num_workers * num_gpus
dev_list = ["/replica:0/task:0/device:CPU:0"
for _ in range(num_devices)]
with self.cached_session():
input_tensors = self._buildInitialVars(shape, dev_list)
un_op = lambda x: math_ops.div(
x, constant_op.constant(num_devices, dtype=types_pb2.DT_FLOAT))
simple_sum = math_ops.add_n(input_tensors)
simple_sum.op.run()
output_tensors = build_f(input_tensors, un_op)
sum_reduced = math_ops.add_n(output_tensors)
sum_reduced.op.run()
self.assertAllClose(sum_reduced.eval(), self.evaluate(simple_sum))
def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv):
start_time = time.time()
build_f = self._buildRing(num_workers, num_gpus, subdiv)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("RingAllReduce num_workers=%d num_gpus=%d shape=%s "
"subdiv=%d elapsed=%f" %
(num_workers, num_gpus, shape, subdiv, elapsed))
def testRingAllReduce(self):
self._testRingAllReduce(1, 2, [], 1)
self._testRingAllReduce(1, 2, [8], 1)
self._testRingAllReduce(1, 2, [4, 4], 1)
self._testRingAllReduce(6, 1, [8], 1)
self._testRingAllReduce(1, 8, [32], 1)
self._testRingAllReduce(1, 8, [120], 1)
self._testRingAllReduce(2, 8, [7, 13], 1)
self._testRingAllReduce(2, 8, [8, 8], 2)
self._testRingAllReduce(2, 8, [8, 8], 4)
# TODO(tucker): The following test is surprisingly slow.
# Diagnose and fix before re-enabling.
# self._testRingAllReduce(4, 8, [8, 8, 2], 4)
def _buildShuffle(self, num_workers, num_gpus, num_shards):
# Use local CPU for all shuffle shards
gather_devices = ["/replica:0/task:0/device:CPU:0"
for _ in range(num_shards)]
return lambda x, un_op: ar.build_shuffle_all_reduce(
x, gather_devices, math_ops.add_n, un_op)
def _testShuffleAllReduce(self, num_workers, num_gpus, shape, num_shards):
start_time = time.time()
build_f = self._buildShuffle(num_workers, num_gpus, num_shards)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("ShuffleAllReduce num_workers=%d num_gpus=%d shape=%s "
"elapsed=%f" % (num_workers, num_gpus, shape, elapsed))
def testShuffleAllReduce(self):
self._testShuffleAllReduce(1, 2, [], 1)
self._testShuffleAllReduce(1, 2, [8], 1)
self._testShuffleAllReduce(1, 2, [4, 4], 1)
self._testShuffleAllReduce(1, 8, [32], 1)
self._testShuffleAllReduce(1, 8, [120], 1)
self._testShuffleAllReduce(2, 8, [7, 13], 3)
self._testShuffleAllReduce(2, 8, [8, 8], 2)
self._testShuffleAllReduce(2, 8, [8, 8], 4)
self._testShuffleAllReduce(4, 8, [8, 8, 2], 4)
def _buildRecursiveHD(self, num_workers, num_gpus):
return lambda x, un_op: ar.build_recursive_hd_all_reduce(
x, math_ops.add, un_op)
# pylint: enable=g-long-lambda
def _testRecursiveHDAllReduce(self, num_workers, num_gpus, shape):
start_time = time.time()
build_f = self._buildRecursiveHD(num_workers, num_gpus)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("RecursiveHDAllReduce num_workers=%d num_gpus=%d "
"shape=%s elapsed=%f" %
(num_workers, num_gpus, shape, elapsed))
def testRecursiveHDAllReduce(self):
self._testRecursiveHDAllReduce(1, 2, [8])
self._testRecursiveHDAllReduce(1, 2, [4, 4])
self._testRecursiveHDAllReduce(1, 8, [32])
self._testRecursiveHDAllReduce(1, 8, [120])
self._testRecursiveHDAllReduce(2, 8, [8, 8])
self._testRecursiveHDAllReduce(4, 8, [8, 8, 2])
if __name__ == "__main__":
test.main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import measurereport
from .fhirdate import FHIRDate
class MeasureReportTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("MeasureReport", js["resourceType"])
return measurereport.MeasureReport(js)
def testMeasureReport1(self):
inst = self.instantiate_from("measurereport-cms146-cat1-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MeasureReport instance")
self.implMeasureReport1(inst)
js = inst.as_json()
self.assertEqual("MeasureReport", js["resourceType"])
inst2 = measurereport.MeasureReport(js)
self.implMeasureReport1(inst2)
def implMeasureReport1(self, inst):
self.assertEqual(inst.contained[0].id, "reporter")
self.assertEqual(inst.date.date, FHIRDate("2014-04-01").date)
self.assertEqual(inst.date.as_json(), "2014-04-01")
self.assertEqual(inst.group[0].id, "CMS146-group-1")
self.assertEqual(inst.group[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].population[0].count, 1)
self.assertEqual(inst.group[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].population[1].count, 1)
self.assertEqual(inst.group[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].population[2].count, 1)
self.assertEqual(inst.group[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[0].code[0].text, "stratifier-ages-up-to-9")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].count, 1)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].count, 1)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].count, 1)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[1].code[0].text, "stratifier-ages-10-plus")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].count, 1)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].count, 1)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].count, 1)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[2].code[0].text, "stratifier-gender")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].count, 1)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].count, 1)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].count, 1)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].value.text, "male")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].value.text, "female")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].value.text, "other")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].value.text, "unknown")
self.assertEqual(inst.id, "measurereport-cms146-cat1-example")
self.assertEqual(inst.identifier[0].value, "measurereport-cms146-cat1-example-2017-03-13")
self.assertEqual(inst.measure, "Measure/CMS146")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.period.end.date, FHIRDate("2014-03-31").date)
self.assertEqual(inst.period.end.as_json(), "2014-03-31")
self.assertEqual(inst.period.start.date, FHIRDate("2014-01-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-01-01")
self.assertEqual(inst.status, "complete")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "individual")
def testMeasureReport2(self):
inst = self.instantiate_from("measurereport-cms146-cat2-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MeasureReport instance")
self.implMeasureReport2(inst)
js = inst.as_json()
self.assertEqual("MeasureReport", js["resourceType"])
inst2 = measurereport.MeasureReport(js)
self.implMeasureReport2(inst2)
def implMeasureReport2(self, inst):
self.assertEqual(inst.contained[0].id, "reporter")
self.assertEqual(inst.date.date, FHIRDate("2014-04-01").date)
self.assertEqual(inst.date.as_json(), "2014-04-01")
self.assertEqual(inst.group[0].id, "CMS146-group-1")
self.assertEqual(inst.group[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].population[0].count, 500)
self.assertEqual(inst.group[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].population[1].count, 200)
self.assertEqual(inst.group[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].population[2].count, 500)
self.assertEqual(inst.group[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].population[3].count, 100)
self.assertEqual(inst.group[0].stratifier[0].code[0].text, "stratifier-ages-up-to-9")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[1].code[0].text, "stratifier-ages-10-plus")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[2].code[0].text, "stratifier-gender")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].value.text, "male")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].value.text, "female")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].value.text, "other")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].code.coding[0].code, "denominator-exclusions")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].value.text, "unknown")
self.assertEqual(inst.id, "measurereport-cms146-cat2-example")
self.assertEqual(inst.identifier[0].value, "measurereport-cms146-cat2-example-2017-03-13")
self.assertEqual(inst.measure, "Measure/CMS146")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.period.end.date, FHIRDate("2014-03-31").date)
self.assertEqual(inst.period.end.as_json(), "2014-03-31")
self.assertEqual(inst.period.start.date, FHIRDate("2014-01-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-01-01")
self.assertEqual(inst.status, "complete")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "subject-list")
def testMeasureReport3(self):
inst = self.instantiate_from("measurereport-cms146-cat3-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MeasureReport instance")
self.implMeasureReport3(inst)
js = inst.as_json()
self.assertEqual("MeasureReport", js["resourceType"])
inst2 = measurereport.MeasureReport(js)
self.implMeasureReport3(inst2)
def implMeasureReport3(self, inst):
self.assertEqual(inst.contained[0].id, "reporter")
self.assertEqual(inst.date.date, FHIRDate("2014-04-01").date)
self.assertEqual(inst.date.as_json(), "2014-04-01")
self.assertEqual(inst.group[0].id, "CMS146-group-1")
self.assertEqual(inst.group[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].population[0].count, 500)
self.assertEqual(inst.group[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].population[1].count, 200)
self.assertEqual(inst.group[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].population[2].count, 500)
self.assertEqual(inst.group[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].population[3].count, 100)
self.assertEqual(inst.group[0].stratifier[0].code[0].text, "stratifier-ages-up-to-9")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[1].code[0].text, "stratifier-ages-10-plus")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[2].code[0].text, "stratifier-gender")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].value.text, "male")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].value.text, "female")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].value.text, "other")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].value.text, "unknown")
self.assertEqual(inst.id, "measurereport-cms146-cat3-example")
self.assertEqual(inst.identifier[0].value, "measurereport-cms146-cat3-example-2017-03-13")
self.assertEqual(inst.measure, "Measure/CMS146")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.period.end.date, FHIRDate("2014-03-31").date)
self.assertEqual(inst.period.end.as_json(), "2014-03-31")
self.assertEqual(inst.period.start.date, FHIRDate("2014-01-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-01-01")
self.assertEqual(inst.status, "complete")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "summary")
| |
""" Author: Cole Howard
Title: Finnegan
An extinsible neural net designed to explore Convolutional Neural Networks and
Recurrent Neural Networks via extensive visualizations.
"""
import numpy as np
from sklearn.preprocessing import normalize
from layer import Layer
# from matplotlib import cm
# from matplotlib import pyplot as plt
# import warnings
# warnings.filterwarnings("ignore", category=DeprecationWarning)
class Network:
""" A multi layer neural net with backpropogation.
Parameters
----------
layers : int
Number of layers to use in the network.
neuron_count : list
A list of integers that represent the number of neurons present in each
hidden layer. (Size of input/output layers are dictated by dataset)
vector : list
Example vector to get size of initial input
Attributes
----------
possible : list
A list of possible output values
"""
def __init__(self, layers, neuron_count, vector):
self.num_layers = layers
self.neuron_count = neuron_count
self.possible = [x for x in range(10)]
self.layers = [Layer(self.neuron_count[x], self.neuron_count[x-1]) if
x > 0 else Layer(self.neuron_count[x], len(vector))
for x in range(self.num_layers)]
def _pass_through_net(self, vector, dropout=True):
""" Sends a vector into the net
Parameters
----------
vector : numpy array
A numpy array representing a training input (without the target)
dropout : bool
Whether or not you should perform random dropout in the pass through
the net. (Set False for the tesing set vectors)
Returns
-------
numpy array
Output of the last layer in the chain
"""
for x, _ in enumerate(self.layers):
vector = self.layers[x]._vector_pass(vector, dropout)
return vector
def _softmax(self, w, t=1.0):
"""Author: Jeremy M. Stober, edits by Martin Thoma
Program: softmax.py
Date: Wednesday, February 29 2012 and July 31 2014
Description: Simple softmax function.
Calculate the softmax of a list of numbers w.
Parameters
----------
w : list of numbers
t : float
Return
------
a list of the same length as w of non-negative numbers
Examples
--------
>>> softmax([0.1, 0.2])
array([ 0.47502081, 0.52497919])
>>> softmax([-0.1, 0.2])
array([ 0.42555748, 0.57444252])
>>> softmax([0.9, -10])
array([ 9.99981542e-01, 1.84578933e-05])
>>> softmax([0, 10])
array([ 4.53978687e-05, 9.99954602e-01])
"""
e_x = np.exp(w - np.max(w))
out = e_x / e_x.sum()
return out
def _backprop(self, guess_vector, target_vector):
""" Takes the output of the net and initiates the backpropogation
In output layer:
generate error matrix [(out * (1-out) * (Target-out)) for each neuron]
update weights matrix [[+= l_rate * error_entry * input TO that
amount] for each neuron ]
In hidden layer
generate error matrix [out * (1-out) * dotproduct(entry in n+1 error
matrix, n+1 weight of that entry)] update weights matrix [[+= l_rate
for each weight] for each neuron]
Parameters
----------
guess_vector : numpy array
The output from the last layer during a training pass
target_vector : list
List of expected values
Attributes
----------
Returns
-------
True
As evidence of execution
"""
backwards_layer_list = list(reversed(self.layers))
for i, layer in enumerate(backwards_layer_list):
if i == 0:
hidden = False
layer_ahead = None
else:
hidden = True
layer_ahead = backwards_layer_list[i-1]
if layer._layer_level_backprop(guess_vector, layer_ahead, target_vector, hidden):
continue
else:
print("Backprop failed on layer: " + str(i))
for layer in self.layers:
layer._update_weights()
# for layer in self.layers:
# layer.error_matrix = []
return True
def train(self, dataset, answers, epochs):
""" Runs the training dataset through the network a given number of
times.
Parameters
----------
dataset : Numpy nested array
The collection of training data (vectors and the associated target
value)
answers : numpy array
The array of correct answers to associate with each training
vector
epochs : int
Number of times to run the training set through the net
"""
for x in range(epochs):
for vector, target in zip(dataset, answers):
target_vector = [0 if x != target else 1 for x in self.possible]
vector = np.array(vector).reshape(1, -1)
vector = vector.astype(float)
vector = normalize(vector, copy=False)[0]
y = self._pass_through_net(vector)
z = self._softmax(y)
self._backprop(z, target_vector)
amt_off = np.mean(np.abs(self.layers[self.num_layers-1].error))
print(amt_off)
if amt_off < .000000001:
break
def run_unseen(self, test_set):
""" Makes guesses on the unseen data, and switches over the test
answers to validation set if the bool is True
For each vector in the collection, each neuron in turn will either
fire or not. If a vector fires, it is collected as a possible
correct guess. Not firing is collected as well, in case
there an no good guesses at all. The method will choose the
vector with the highest dot product, from either the fired list
or the dud list.
Parameters
----------
test_set : list
List of numpy arrays representing the unseen vectors
Returns
-------
list
a list of ints (the guesses for each vector)
"""
guess_list = []
for vector in test_set:
vector = np.array(vector).reshape(1, -1)
vector = vector.astype(float)
temp = self._pass_through_net(normalize(vector, copy=False)[0],
dropout=False)
guess_list.append(temp.argmax())
return guess_list
def report_results(self, guess_list, answers):
""" Reports results of guesses on unseen set
Parameters
----------
guess_list : list
answers : list
"""
successes = 0
for idx, item in enumerate(guess_list):
if answers[idx] == item:
successes += 1
print(guess_list)
print("Successes: {} Out of total: {}".format(successes,
len(guess_list)))
print("For a success rate of: ", successes/len(guess_list))
def visualization(self, vector, vector_name):
y = np.reshape(vector, (28, 28))
plt.imshow(y, cmap=cm.Greys_r)
plt.suptitle(vector_name)
plt.axis('off')
plt.pause(0.0001)
plt.show()
if __name__ == '__main__':
print("Please use net_launch.py")
| |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import functools
from contextlib import contextmanager
from yosaipy2.core.utils.utils import get_logger
from yosaipy2.core import (
SessionStorageEvaluator,
LazySettings,
SecurityManagerSettings,
SerializationManager,
SessionException,
ThreadStateManager,
UnauthenticatedException,
subject_abcs,
)
class SubjectContext(subject_abcs.SubjectContext):
"""
A SubjectContext assists a SecurityManager and SubjectFactory with the
configuration of new Subject instances. It employs a number of heuristics
to acquire data for its attributes, exhausting all available resources at
its disposal (heuristic resolution of data).
Most Yosai users will never instantiate a SubjectContext object directly
but rather will use a SubjectBuilder, which internally uses a SubjectContext,
to build Subject instances.
"""
def __init__(self, yosai, security_manager):
self.account_id = None
self.authentication_token = None
self.authenticated = None
self.identifiers = None
self.host = None
self.security_manager = security_manager
self.yosai = yosai
self.session = None
self.session_id = None
self.session_creation_enabled = True
self.subject = None
self._logger = get_logger()
def resolve_security_manager(self):
security_manager = self.security_manager
if security_manager is None:
msg = ("No SecurityManager available in subject context. " +
"Falling back to Yosai.security_manager for" +
" lookup.")
self._logger.debug(msg)
try:
security_manager = self.yosai.security_manager
except AttributeError:
msg = ("SubjectContext.resolve_security_manager cannot "
"obtain security_manager! No SecurityManager available "
"via Yosai. Heuristics exhausted.")
self._logger.debug(msg, exc_info=True)
return security_manager
def resolve_identifiers(self, session):
identifiers = self.identifiers
if not identifiers:
# account_id is a SimpleIdentifierCollection:
try:
identifiers = self.account_id
except AttributeError:
pass
if not identifiers:
try:
identifiers = self.subject.identifiers
except AttributeError:
pass
# otherwise, use the session key as the identifier:
if not identifiers:
try:
identifiers = session.get_internal_attribute('identifiers_session_key')
except AttributeError:
identifiers = None
return identifiers
def resolve_session(self):
session = self.session
if session is None:
try:
session = self.subject.get_session(False)
except AttributeError:
pass
return session
def resolve_authenticated(self, session):
authc = self.authenticated
if authc is None:
# presence of one indicates a successful authentication attempt:
# See whethere there is an AccountID
try:
authc = self.account_id
except AttributeError:
pass
if authc is None:
# fall back to a session check:
try:
authc = session.get_internal_attribute('authenticated_session_key')
except AttributeError:
authc = None
return bool(authc)
def resolve_host(self, session):
host = self.host
if host is None:
# check to see if there is an AuthenticationToken from which to
# retrieve it:
try:
host = self.authentication_token.host
except AttributeError:
pass
if host is None:
try:
host = session.host
except AttributeError:
pass
return host
def __repr__(self):
return "{0}(subject={1})".format(self.__class__.__name__, self.subject)
class DelegatingSubject(subject_abcs.Subject):
"""
A ``DelegatingSubject`` delegates method calls to an underlying ``SecurityManager``
instance for security checks. It is essentially a ``SecurityManager`` proxy,
just as ``DelegatingSession`` is to ``NativeSessionManager``.
This implementation does not maintain security-related state such as roles and
permissions. Instead, it asks the underlying SecurityManager to check
authorization. However, Subject-specific state, such as username, is
saved. Furthermore, if you are using the WebDelegatingSubject derivative, the
WebRegistry object is saved.
A common misconception in using this implementation is that an EIS resource
(RDBMS, etc) would be 'hit' every time a method is called. This is not
necessarily the case and is up to the implementation of the underlying
SecurityManager instance. If caching of authorization data is desired
(to eliminate EIS round trips and therefore improve database performance),
it is considered much more elegant to let the underlying SecurityManager
implementation or its delegate components manage caching, not this class.
A ``SecurityManager`` is considered a business-tier component, where caching
strategies are better managed.
Run-As
--------
Yosai includes 'Run-As' functionality. A Run-As scenario is one where
a user, such as an Admin or Developer, assumes the identity of another
user so that the Admin/Developer may experience Yosai as the target user
would (as if the target had logged in). This helps w/ customer support,
debugging, etc.
Concurrency
-------------
Shiro uses multithreading. Yosai's approach to concurrency will be decided
once CPU and IO statistics have been collected from the synchronous version.
Until then, I've commented out the ported multithreading-related methods.
:type authenticated: bool
"""
def __init__(self,
identifiers=None,
remembered=False,
authenticated=False,
host=None,
session=None,
session_creation_enabled=True,
security_manager=None):
self._identifiers = None
self.security_manager = security_manager
self.identifiers = identifiers
self.remembered = remembered
self.authenticated = authenticated
self.host = host
if session is not None:
session.stop_session_callback = self.session_stopped
self.session = session
else:
self.session = None
self.session_creation_enabled = session_creation_enabled
self.run_as_identifiers_session_key = 'run_as_identifiers_session_key'
self._logger = get_logger()
# this is a placeholder for subclasses, which use more elaborate checking:
def is_session_creation_enabled(self):
return self.session_creation_enabled
# new to yosai.core.
# security_manager is required for certain operations
def check_security_manager(self):
if self.security_manager is None:
msg = "DelegatingSubject requires that a SecurityManager be set"
raise ValueError(msg)
@property
def has_identifiers(self):
return bool(self.identifiers)
@property
def primary_identifier(self):
try:
return self.identifiers.primary_identifier
except:
return None
@property
def identifiers(self):
# expecting a List of IdentifierCollection objects:
run_as_identifiers = self.get_run_as_identifiers_stack()
if not run_as_identifiers:
return self._identifiers
else:
return run_as_identifiers[-1]
@identifiers.setter
def identifiers(self, identifiers):
"""
:type identifiers: subject_abcs.IdentifierCollection
"""
if isinstance(identifiers, subject_abcs.IdentifierCollection) or (identifiers is None):
self._identifiers = identifiers
else:
raise ValueError('must use IdentifierCollection')
def is_permitted(self, permission_s):
"""
:param permission_s: a collection of 1..N permissions
:type permission_s: List of authz_abcs.Permission object(s) or String(s)
:returns: a List of tuple(s), containing the authz_abcs.Permission and a
Boolean indicating whether the permission is granted
"""
if self.authorized:
self.check_security_manager()
return (self.security_manager.is_permitted(
self.identifiers, permission_s))
msg = 'Cannot check permission when user isn\'t authenticated nor remembered'
raise ValueError(msg)
# refactored is_permitted_all:
def is_permitted_collective(self, permission_s, logical_operator=all):
"""
:param permission_s: a List of authz_abcs.Permission objects
:param logical_operator: indicates whether *all* or at least one
permission check is true, *any*
:type: any OR all (functions from python stdlib)
:returns: a Boolean
"""
sm = self.security_manager
if self.authorized:
return sm.is_permitted_collective(self.identifiers,
permission_s,
logical_operator)
msg = 'Cannot check permission when user isn\'t authenticated nor remembered'
raise ValueError(msg)
def assert_authz_check_possible(self):
if not self.identifiers:
msg = (
"This subject is anonymous - it does not have any " +
"identification and authorization operations " +
"required an identity to check against. A Subject " +
"instance will acquire these identifying identifier " +
"automatically after a successful login is performed be " +
"executing " + self.__class__.__name__ +
".login(Account) or when 'Remember Me' " +
"functionality is enabled by the SecurityManager. " +
"This exception can also occur when a previously " +
"logged-in Subject has logged out which makes it " +
"anonymous again. Because an identity is currently not " +
"known due to any of these conditions, authorization is " +
"denied.")
raise UnauthenticatedException(msg)
def check_permission(self, permission_s, logical_operator=all):
"""
:param permission_s: a collection of 1..N permissions
:type permission_s: List of authz_abcs.Permission objects or Strings
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python stdlib)
:raises UnauthorizedException: if any permission is unauthorized
"""
self.assert_authz_check_possible()
if self.authorized:
self.security_manager.check_permission(self.identifiers,
permission_s,
logical_operator)
else:
msg = 'Cannot check permission when user isn\'t authenticated nor remembered'
raise ValueError(msg)
def has_role(self, role_s):
"""
:param role_s: 1..N role identifiers (strings)
:type role_s: Set of Strings
:returns: a set of tuple(s), containing the role and a Boolean
indicating whether the user is a member of the Role
"""
if self.authorized:
return self.security_manager.has_role(self.identifiers, role_s)
msg = 'Cannot check permission when identifiers aren\'t set!'
raise ValueError(msg)
# refactored has_all_roles:
def has_role_collective(self, role_s, logical_operator=all):
"""
:param role_s: 1..N role identifier
:type role_s: a Set of Strings
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python standard library)
:returns: a Boolean
"""
if self.authorized:
return self.security_manager.has_role_collective(self.identifiers,
role_s,
logical_operator)
else:
msg = 'Cannot check permission when identifiers aren\'t set!'
raise ValueError(msg)
def check_role(self, role_ids, logical_operator=all):
"""
:param role_ids: 1 or more RoleIds
:type role_ids: a Set of Strings
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python stdlib)
:raises UnauthorizedException: if Subject not assigned to all roles
"""
if self.authorized:
self.security_manager.check_role(self.identifiers,
role_ids,
logical_operator)
else:
msg = 'Cannot check permission when identifiers aren\'t set!'
raise ValueError(msg)
def login(self, authc_token):
"""
:type authc_token: authc_abcs.AuthenticationToken
authc_token's password is cleartext that is stored as a bytearray.
The authc_token password is cleared in memory, within the authc_token,
when authentication is successful.
"""
self.clear_run_as_identities_internal()
# login raises an AuthenticationException if it fails to authenticate:
subject = self.security_manager.login(subject=self,
authc_token=authc_token)
host = None
if isinstance(subject, DelegatingSubject):
# directly reference the attributes in case there are assumed
# identities (Run-As) -- we don't want to lose the 'real' identifiers
identifiers = subject._identifiers
host = subject.host
else:
identifiers = subject.identifiers # use the property accessor
if not identifiers:
msg = ("Identifiers returned from security_manager.login(authc_token" +
") returned None or empty value. This value must be" +
" non-None and populated with one or more elements.")
raise ValueError(msg)
self._identifiers = identifiers
self.authenticated = True
if not host:
try:
host = authc_token.host
except AttributeError: # likely not using a HostAuthenticationToken
host = None
self.host = host
session = subject.get_session(False)
if session:
session.stop_session_callback = self.session_stopped
self.session = session
else:
self.session = None
@property
def authorized(self):
return self.remembered or self.authenticated
def get_session(self, create=True):
"""
:type create: bool
"""
msg = ("{0} attempting to get session; create = {1}; 'session is None' ="
"{2} ; 'session has id' = {3}").format(
self.__class__.__name__, create, (self.session is None),
str(self.session is not None and bool(self.session.session_id)))
self._logger.debug(msg)
if self.session and not create: # touching a new session is redundant
self.session.touch() # this is used to reset the idle timer (new to yosai)
return self.session
if not self.session and create:
if not self.is_session_creation_enabled():
msg = ("Session creation is disabled for the current subject. "
"This exception indicates that there is "
"either a programming error (using a session when "
"it should never be used) or that Yosai's "
"configuration needs to be adjusted to allow "
"Sessions to be created for the current Subject.")
raise ValueError(msg)
msg = ("Starting session for host ", str(self.host))
self._logger.debug(msg)
session_context = self.create_session_context()
session = self.security_manager.start(session_context)
session.stop_session_callback = self.session_stopped
self.session = session
return self.session
def create_session_context(self):
session_context = {'host': self.host}
return session_context
def clear_run_as_identities_internal(self):
try:
self.clear_run_as_identities()
except SessionException:
msg = ("clearrunasidentitiesinternal: Encountered session "
"exception trying to clear 'runAs' identities during "
"logout. This can generally safely be ignored.")
self._logger.debug(msg, exc_info=True)
def logout(self):
try:
self.clear_run_as_identities_internal()
self.security_manager.logout(self)
finally:
self.session = None
self._identifiers = None
self.authenticated = False
def session_stopped(self):
self.session = None
def run_as(self, identifiers):
"""
:type identifiers: subject_abcs.IdentifierCollection
"""
if not self.has_identifiers:
msg = ("This subject does not yet have an identity. Assuming the "
"identity of another Subject is only allowed for Subjects "
"with an existing identity. Try logging this subject in "
"first, or using the DelegatingSubject.Builder "
"to build ad hoc Subject instances with identities as "
"necessary.")
raise ValueError(msg)
self.push_identity(identifiers)
@property
def is_run_as(self):
return bool(self.get_run_as_identifiers_stack())
def get_previous_identifiers(self):
"""
:returns: SimpleIdentifierCollection
"""
previous_identifiers = None
stack = self.get_run_as_identifiers_stack() # TBD: must confirm logic
if stack:
if len(stack) == 1:
previous_identifiers = self.identifiers
else:
# always get the one behind the current
previous_identifiers = stack[1]
return previous_identifiers
def release_run_as(self):
return self.pop_identity()
def get_run_as_identifiers_stack(self):
"""
:returns: an IdentifierCollection
"""
session = self.get_session(False)
try:
return session.get_internal_attribute(self.run_as_identifiers_session_key)
except AttributeError:
return None
def clear_run_as_identities(self):
session = self.get_session(False)
if session is not None:
session.remove_internal_attribute(
self.run_as_identifiers_session_key)
def push_identity(self, identifiers):
"""
:type identifiers: subject_abcs.IdentifierCollection
"""
if not identifiers:
msg = ("Specified Subject identifiers cannot be None or empty "
"for 'run as' functionality.")
raise ValueError(msg)
stack = self.get_run_as_identifiers_stack()
if not stack:
stack = []
stack.append(identifiers)
session = self.get_session()
session.set_internal_attribute(self.run_as_identifiers_session_key, stack)
def pop_identity(self):
"""
:returns: SimpleIdentifierCollection
"""
popped = None
stack = self.get_run_as_identifiers_stack()
if stack:
popped = stack.pop()
if stack:
# persist the changed stack to the session
session = self.get_session()
session.set_internal_attribute(self.run_as_identifiers_session_key, stack)
else:
# stack is empty, remove it from the session:
self.clear_run_as_identities()
return popped
def __repr__(self):
return "{0}(_identifiers={1}, authenticated={2})". \
format(self.__class__.__name__, self._identifiers, self.authenticated)
# migrated from /mgt:
class SubjectStore(object):
"""
This is known as /mgt/SubjectDAO in Shiro.
This is the default ``SubjectStore`` implementation for storing ``Subject`` state.
The default behavior is to save ``Subject`` state into the Subject's ``Session``.
Note that the storing of the ``Subject`` state into the ``Session`` is considered
a default behavior of Yosai but this behavior can be disabled -- see below.
Once a Subject's state is stored in a Session, a ``Subject`` instance can be
re-created at a later time by first acquiring the Subject's session. A
Subject's session is typically acquired through interaction with a
SessionManager, referencing a ``Session`` by session_id or
session_key, and then instantiating/building a Subject instance using
Session attributes.
Controlling How Sessions are Used
---------------------------------
Whether a Subject's ``Session`` is used to persist the Subject's state is
controlled on a per-Subject basis.
Disabling Session Persistence Entirely
--------------------------------------
You can disable Session usage for Subject state entirely by setting
the Subject Store's session_storage_enabled attribute:
session_store.session_storage_evaluator.session_storage_enabled = False
or, for example, when initializing the SecurityManager:::
SecurityManager.subject_store.session_storage_evaluator.session_storage_enabled = False
However, Note: ONLY do this if your application is 100% stateless and you
*DO NOT* need subjects to be remembered across remote invocations, or in a web
environment across HTTP requests.
Supporting Both Stateful and Stateless Subject paradigms
--------------------------------------------------------
Perhaps your application needs to support a hybrid approach of both
stateful and stateless Subjects:
- Stateful: Stateful subjects might represent web end-users that need
their identity and authentication state to be remembered from page to
page.
- Stateless: Stateless subjects might represent API clients (e.g. REST
clients) that authenticate on every request, and therefore don't need
authentication state to be stored across requests in a session.
"""
def __init__(self, ss_evaluator=SessionStorageEvaluator()):
self.session_storage_evaluator = ss_evaluator
self.dsc_isk = 'identifiers_session_key'
self.dsc_ask = 'authenticated_session_key'
self._logger = get_logger()
def is_session_storage_enabled(self, subject):
"""
:type subject: subject_abcs.Subject
Determines whether the subject's ``Session`` will be used to persist
subject state. This default implementation merely delegates to the
internal ``SessionStorageEvaluator``.
"""
return self.session_storage_evaluator. \
is_session_storage_enabled(subject)
def save(self, subject):
"""
Saves the subject's state to the subject's ``Session`` only
if session storage is enabled for the subject. If session storage is
not enabled for the specific Subject, this method does nothing.
In either case, the argument Subject is returned directly (a new
``Subject`` instance is not created).
:param subject: the Subject instance for which its state will be
created or updated
:type subject: DelegatingSubject
:returns: the same Subject passed in (a new Subject instance is
not created).
"""
if self.is_session_storage_enabled(subject):
self.merge_identity(subject)
else:
msg = ("Session storage of subject state for Subject [{0}] has "
"been disabled: identity and authentication state are "
"expected to be initialized on every request or "
"invocation.".format(subject))
self._logger.debug(msg)
return subject
# yosai consolidates merge_principals and merge_authentication_state
def merge_identity(self, subject):
"""
Merges the Subject's identifying attributes (principals) and authc status
into the Subject's session
:type subject: DelegatingSubject
"""
current_identifiers = subject.identifiers
session = subject.get_session(False)
if not session:
to_set = []
if current_identifiers or subject.authenticated:
session = subject.get_session()
to_set.append([self.dsc_isk, current_identifiers])
to_set.append([self.dsc_ask, True])
msg = ('merge_identity _DID NOT_ find a session for current subject '
'and so created a new one (session_id: {0}). Now merging '
'internal attributes: {1}'.format(session.session_id, to_set))
self._logger.debug(msg)
session.set_internal_attributes(to_set)
else:
self.merge_identity_with_session(current_identifiers, subject, session)
def merge_identity_with_session(self, current_identifiers, subject, session):
msg = 'merge_identity _DID_ find a session for current subject.'
self._logger.debug(msg)
to_remove = []
to_set = []
internal_attributes = session.get_internal_attributes()
existing_identifiers = internal_attributes.get(self.dsc_isk)
if not current_identifiers:
if existing_identifiers:
to_remove.append(self.dsc_isk)
# otherwise both are null or empty - no need to update session
else:
if not (current_identifiers == existing_identifiers):
to_set.append([self.dsc_isk, current_identifiers])
# otherwise they're the same - no need to update the session
existing_authc = internal_attributes.get(self.dsc_ask)
if subject.authenticated:
if existing_authc is None: # either doesnt exist or set None
to_set.append([self.dsc_ask, True])
# otherwise authc state matches - no need to update the session
else:
if existing_authc is not None:
# existing doesn't match the current state - remove it:
to_remove.append(self.dsc_ask)
# otherwise not in the session and not authenticated and
# no need to update the session
if to_set:
session.set_internal_attributes(to_set)
if to_remove:
session.remove_internal_attributes(to_remove)
def delete(self, subject):
"""
:type subject: subject_abcs.Subject
"""
session = subject.get_session(False)
if session:
session.remove_internal_attribute(self.dsc_ask)
session.remove_internal_attribute(self.dsc_isk)
# moved from its own yosai module so as to avoid circular importing:
class Yosai(object):
_logger = get_logger()
def __init__(self, env_var=None, file_path=None, session_attributes=None):
"""
:type session_attributes: tuple
"""
# you can configure LazySettings in one of two ways: env or file_path
self.settings = LazySettings(env_var=env_var, file_path=file_path)
self.security_manager = \
self.generate_security_manager(self.settings, session_attributes)
def generate_security_manager(self, settings, session_attributes):
# don't forget to pass default_cipher_key into the WebSecurityManager
mgr_builder = SecurityManagerCreator()
return mgr_builder.create_manager(self, settings, session_attributes)
def _get_subject(self):
"""
Returns the currently accessible Subject available to the calling code
depending on runtime environment.
:returns: the Subject currently accessible to the calling code
"""
subject_context = SubjectContext(yosai=self, security_manager=self.security_manager)
subject = self.security_manager.create_subject(subject_context=subject_context)
global_subject_context.stack.append(subject)
return subject
@staticmethod
@contextmanager
def context(yosai):
global_yosai_context.stack.append(yosai)
try:
yield
except:
raise
finally:
global_yosai_context.stack = []
global_subject_context.stack = []
@classmethod
def get_current_subject(cls):
try:
subject = global_subject_context.stack[-1]
msg = ('A subject instance DOES exist in the global context. '
'Touching and then returning it.')
cls._logger.debug(msg)
subject.get_session().touch()
return subject
except IndexError:
msg = 'A subject instance _DOES NOT_ exist in the global context. Creating one.'
cls._logger.debug(msg)
subject = Yosai.get_current_yosai()._get_subject()
global_subject_context.stack.append(subject)
return subject
@staticmethod
def get_current_yosai():
try:
return global_yosai_context.stack[-1]
except IndexError:
msg = 'A yosai instance does not exist in the global context.'
raise IndexError(msg)
@staticmethod
def requires_authentication(fn):
"""
Requires that the calling Subject be authenticated before allowing access.
:raises UnauthenticatedException: indicating that the decorated method is
not allowed to be executed because the
Subject failed to authenticate
"""
@functools.wraps(fn)
def wrap(*args, **kwargs):
subject = Yosai.get_current_subject()
if not subject.authenticated:
msg = "The current Subject is not authenticated. ACCESS DENIED."
raise UnauthenticatedException(msg)
return fn(*args, **kwargs)
return wrap
@staticmethod
def requires_user(fn):
"""
Requires that the calling Subject be *either* authenticated *or* remembered
via RememberMe services before allowing access.
This method essentially ensures that subject.identifiers IS NOT None
:raises UnauthenticatedException: indicating that the decorated method is
not allowed to be executed because the
Subject attempted to perform a user-only
operation
"""
@functools.wraps(fn)
def wrap(*args, **kwargs):
subject = Yosai.get_current_subject()
if subject.identifiers is None:
msg = ("Attempting to perform a user-only operation. The "
"current Subject is NOT a user (they haven't been "
"authenticated or remembered from a previous login). "
"ACCESS DENIED.")
raise UnauthenticatedException(msg)
return fn(*args, **kwargs)
return wrap
@staticmethod
def requires_guest(fn):
"""
Requires that the calling Subject be NOT (yet) recognized in the system as
a user -- the Subject is not yet authenticated nor remembered through
RememberMe services.
This method essentially ensures that subject.identifiers IS None
:raises UnauthenticatedException: indicating that the decorated method is
not allowed to be executed because the
Subject attempted to perform a guest-only
operation
"""
@functools.wraps(fn)
def wrap(*args, **kwargs):
subject = Yosai.get_current_subject()
if subject.identifiers is not None:
msg = ("Attempting to perform a guest-only operation. The "
"current Subject is NOT a guest (they have either been "
"authenticated or remembered from a previous login). "
"ACCESS DENIED.")
raise UnauthenticatedException(msg)
return fn(*args, **kwargs)
return wrap
@staticmethod
def requires_permission(permission_s, logical_operator=all):
"""
Requires that the calling Subject be authorized to the extent that is
required to satisfy the permission_s specified and the logical operation
upon them.
:param permission_s: the permission(s) required
:type permission_s: a List of Strings or List of Permission instances
:param logical_operator: indicates whether all or at least one permission
is true (and, any)
:type: and OR all (from python standard library)
:raises AuthorizationException: if the user does not have sufficient
permission
Elaborate Example:
requires_permission(
permission_s=['domain1:action1,action2', 'domain2:action1'],
logical_operator=any)
Basic Example:
requires_permission(['domain1:action1,action2'])
"""
def outer_wrap(fn):
@functools.wraps(fn)
def inner_wrap(*args, **kwargs):
subject = Yosai.get_current_subject()
subject.check_permission(permission_s, logical_operator)
return fn(*args, **kwargs)
return inner_wrap
return outer_wrap
@staticmethod
def requires_dynamic_permission(permission_s, logical_operator=all):
"""
This method requires that the calling Subject be authorized to the extent
that is required to satisfy the dynamic permission_s specified and the logical
operation upon them. Unlike ``requires_permission``, which uses statically
defined permissions, this function derives a permission from arguments
specified at declaration.
Dynamic permissioning requires that the dynamic arguments be keyword
arguments of the decorated method.
:param permission_s: the permission(s) required
:type permission_s: a List of Strings or List of Permission instances
:param logical_operator: indicates whether all or at least one permission
is true (and, any)
:type: and OR all (from python standard library)
:raises AuthorizationException: if the user does not have sufficient
permission
Elaborate Example:
requires_permission(
permission_s=['{kwarg1.domainid}:action1,action2',
'{kwarg2.domainid}:action1'],
logical_operator=any)
Basic Example:
requires_permission(['{kwarg.domainid}:action1,action2'])
"""
def outer_wrap(fn):
@functools.wraps(fn)
def inner_wrap(*args, **kwargs):
newperms = [perm.format(**kwargs) for perm in permission_s]
subject = Yosai.get_current_subject()
subject.check_permission(newperms, logical_operator)
return fn(*args, **kwargs)
return inner_wrap
return outer_wrap
@staticmethod
def requires_role(role_s, logical_operator=all):
"""
Requires that the calling Subject be authorized to the extent that is
required to satisfy the role_s specified and the logical operation
upon them.
:param role_s: a collection of the role(s) required, specified by
identifiers (such as a role name)
:type role_s: a List of Strings
:param logical_operator: indicates whether all or at least one permission
is true (and, any)
:type: and OR all (from python standard library)
:raises AuthorizationException: if the user does not have sufficient
role membership
Elaborate Example:
requires_role(role_s=['sysadmin', 'developer'], logical_operator=any)
Basic Example:
requires_role('physician')
"""
def outer_wrap(fn):
@functools.wraps(fn)
def inner_wrap(*args, **kwargs):
subject = Yosai.get_current_subject()
subject.check_role(role_s, logical_operator)
return fn(*args, **kwargs)
return inner_wrap
return outer_wrap
def __eq__(self, other):
return self.security_manager == other.security_manager
class SecurityManagerCreator(object):
@staticmethod
def _init_realms(settings, realms):
try:
return tuple(realm(account_store=account_store(settings=settings), **verifiers)
for realm, account_store, verifiers in realms)
except (AttributeError, TypeError) as exc:
msg = 'Failed to initialize realms during SecurityManager Setup'
raise exc.__class__(msg)
@staticmethod
def _init_cache_handler(settings, cache_handler, serialization_manager):
try:
return cache_handler(settings=settings,
serialization_manager=serialization_manager)
except TypeError:
return None
@staticmethod
def _init_session_attributes(session_attributes, attributes):
if session_attributes:
return session_attributes
try:
sas = attributes['session_attributes']
if sas:
return sas
except (TypeError, KeyError):
return None
def create_manager(self, yosai, settings, session_attributes):
"""
Order of execution matters. The sac must be set before the cache_handler is
instantiated so that the cache_handler's serialization manager instance
registers the sac.
"""
mgr_settings = SecurityManagerSettings(settings)
attributes = mgr_settings.attributes
realms = self._init_realms(settings, attributes['realms'])
session_attributes = self._init_session_attributes(session_attributes, attributes)
serialization_manager = SerializationManager()
# the cache_handler doesn't initialize a cache_realm until it gets
# a serialization manager, which is assigned within the SecurityManager
cache_handler = self._init_cache_handler(settings,
attributes['cache_handler'],
serialization_manager)
manager = mgr_settings.security_manager(yosai,
settings,
realms=realms,
cache_handler=cache_handler,
serialization_manager=serialization_manager)
return manager
# Set Global State Managers
global_yosai_context = ThreadStateManager()
global_subject_context = ThreadStateManager()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
import json
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from py4j.protocol import Py4JError
from pyspark import since
from pyspark.rdd import RDD, _prepare_for_python_RDD, ignore_unicode_prefix
from pyspark.serializers import AutoBatchedSerializer, PickleSerializer
from pyspark.sql.types import Row, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.utils import install_exception_handler
from pyspark.sql.functions import UserDefinedFunction
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
def _monkey_patch_RDD(sqlContext):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``sqlContext.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a StructType or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sqlContext.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SQLContext(object):
"""Main entry point for Spark SQL functionality.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.registerTempTable("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row(_c0=2, _c1=2.0, _c2=False, _c3=2, _c4=0, \
time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
self._scala_SQLContext = sqlContext
_monkey_patch_RDD(self)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
if self._scala_SQLContext is None:
self._scala_SQLContext = self._jvm.SQLContext(self._jsc.sc())
return self._scala_SQLContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
cls(sc, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary tables and UDFs, but shared SparkContext and
table cache.
"""
jsqlContext = self._ssql_ctx.newSession()
return self.__class__(self._sc, jsqlContext)
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self._ssql_ctx.setConf(key, value)
@since(1.3)
def getConf(self, key, defaultValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set, returns defaultValue.
"""
return self._ssql_ctx.getConf(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single LongType column named `id`,
containing elements in a range from `start` to `end` (exclusive) with
step value `step`.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._ssql_ctx.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._ssql_ctx.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a lambda function as a UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param samplingRatio: lambda function
:param returnType: a :class:`DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(_c0=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(_c0=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(_c0=4)]
"""
udf = UserDefinedFunction(f, returnType, name)
self._ssql_ctx.udf().registerPython(name, udf._judf)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: StructType
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = _infer_schema(first)
if _has_nulltype(schema):
for r in data:
schema = _merge_type(schema, _infer_schema(r))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: StructType
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
@ignore_unicode_prefix
def inferSchema(self, rdd, samplingRatio=None):
"""
.. note:: Deprecated in 1.3, use :func:`createDataFrame` instead.
"""
warnings.warn("inferSchema is deprecated, please use createDataFrame instead.")
if isinstance(rdd, DataFrame):
raise TypeError("Cannot apply schema to DataFrame")
return self.createDataFrame(rdd, None, samplingRatio)
@ignore_unicode_prefix
def applySchema(self, rdd, schema):
"""
.. note:: Deprecated in 1.3, use :func:`createDataFrame` instead.
"""
warnings.warn("applySchema is deprecated, please use createDataFrame instead")
if isinstance(rdd, DataFrame):
raise TypeError("Cannot apply schema to DataFrame")
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType, but got %s" % type(schema))
return self.createDataFrame(rdd, schema)
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif isinstance(schema, StructType):
# take the first few rows to verify schema
rows = rdd.take(10)
for row in rows:
_verify_type(row, schema)
else:
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from an list or pandas.DataFrame, returns
the RDD and schema.
"""
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif isinstance(schema, StructType):
for row in data:
_verify_type(row, schema)
else:
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None):
"""
Creates a :class:`DataFrame` from an :class:`RDD` of :class:`tuple`/:class:`list`,
list or :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of :class:`Row`/:class:`tuple`/:class:`list`/:class:`dict`,
:class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`StructType` or list of column names. default None.
:param samplingRatio: the sample ratio of rows used for inferring
:return: :class:`DataFrame`
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]]).collect()) # doctest: +SKIP
[Row(0=1, 1=2)]
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data, schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(data, schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._ssql_ctx.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self)
df._schema = schema
return df
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
if (df.__class__ is DataFrame):
self._ssql_ctx.registerDataFrameAsTable(df._jdf, tableName)
else:
raise ValueError("Can only register DataFrame as table")
def parquetFile(self, *paths):
"""Loads a Parquet file, returning the result as a :class:`DataFrame`.
.. note:: Deprecated in 1.4, use :func:`DataFrameReader.parquet` instead.
>>> sqlContext.parquetFile('python/test_support/sql/parquet_partitioned').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
warnings.warn("parquetFile is deprecated. Use read.parquet() instead.")
gateway = self._sc._gateway
jpaths = gateway.new_array(gateway.jvm.java.lang.String, len(paths))
for i in range(0, len(paths)):
jpaths[i] = paths[i]
jdf = self._ssql_ctx.parquetFile(jpaths)
return DataFrame(jdf, self)
def jsonFile(self, path, schema=None, samplingRatio=1.0):
"""Loads a text file storing one JSON object per line as a :class:`DataFrame`.
.. note:: Deprecated in 1.4, use :func:`DataFrameReader.json` instead.
>>> sqlContext.jsonFile('python/test_support/sql/people.json').dtypes
[('age', 'bigint'), ('name', 'string')]
"""
warnings.warn("jsonFile is deprecated. Use read.json() instead.")
if schema is None:
df = self._ssql_ctx.jsonFile(path, samplingRatio)
else:
scala_datatype = self._ssql_ctx.parseDataType(schema.json())
df = self._ssql_ctx.jsonFile(path, scala_datatype)
return DataFrame(df, self)
@ignore_unicode_prefix
@since(1.0)
def jsonRDD(self, rdd, schema=None, samplingRatio=1.0):
"""Loads an RDD storing one JSON object per string as a :class:`DataFrame`.
If the schema is provided, applies the given schema to this JSON dataset.
Otherwise, it samples the dataset with ratio ``samplingRatio`` to determine the schema.
>>> df1 = sqlContext.jsonRDD(json)
>>> df1.first()
Row(field1=1, field2=u'row1', field3=Row(field4=11, field5=None), field6=None)
>>> df2 = sqlContext.jsonRDD(json, df1.schema)
>>> df2.first()
Row(field1=1, field2=u'row1', field3=Row(field4=11, field5=None), field6=None)
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("field2", StringType()),
... StructField("field3",
... StructType([StructField("field5", ArrayType(IntegerType()))]))
... ])
>>> df3 = sqlContext.jsonRDD(json, schema)
>>> df3.first()
Row(field2=u'row1', field3=Row(field5=None))
"""
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = rdd.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._jvm.BytesToString())
if schema is None:
df = self._ssql_ctx.jsonRDD(jrdd.rdd(), samplingRatio)
else:
scala_datatype = self._ssql_ctx.parseDataType(schema.json())
df = self._ssql_ctx.jsonRDD(jrdd.rdd(), scala_datatype)
return DataFrame(df, self)
def load(self, path=None, source=None, schema=None, **options):
"""Returns the dataset in a data source as a :class:`DataFrame`.
.. note:: Deprecated in 1.4, use :func:`DataFrameReader.load` instead.
"""
warnings.warn("load is deprecated. Use read.load() instead.")
return self.read.load(path, source, schema, **options)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
if path is not None:
options["path"] = path
if source is None:
source = self.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
if schema is None:
df = self._ssql_ctx.createExternalTable(tableName, source, options)
else:
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
scala_datatype = self._ssql_ctx.parseDataType(schema.json())
df = self._ssql_ctx.createExternalTable(tableName, source, scala_datatype,
options)
return DataFrame(df, self)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._ssql_ctx.sql(sqlQuery), self)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._ssql_ctx.table(tableName), self)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("db")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param hiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
"""
def __init__(self, sparkContext, hiveContext=None):
SQLContext.__init__(self, sparkContext)
if hiveContext:
self._scala_HiveContext = hiveContext
@property
def _ssql_ctx(self):
try:
if not hasattr(self, '_scala_HiveContext'):
self._scala_HiveContext = self._get_hive_ctx()
return self._scala_HiveContext
except Py4JError as e:
raise Exception("You must build Spark with Hive. "
"Export 'SPARK_HIVE=true' and run "
"build/sbt assembly", e)
def _get_hive_ctx(self):
return self._jvm.HiveContext(self._jsc.sc())
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
import os
from cffi import FFI
from numpy import *
try:
from pydb import debugger
## Also add an exception hook.
import pydb, sys
sys.excepthook = pydb.exception_hook
except ImportError:
import pdb
def debugger():
pdb.set_trace()
## Compile the library with:
'''
# OSX
g++ -fPIC \
bbw.cpp mvc.cpp harmonic.cpp \
-std=c++11 \
-I../libigl/include \
-I/usr/local/include/eigen3 \
-I/usr/local/include/eigen3/unsupported \
-dynamiclib -o bbw.dylib \
-g -O3 -Wall -Wshadow -Wno-sign-compare
g++-mp-4.7 -static-libgcc -static-libstdc++ -fPIC \
bbw.cpp mvc.cpp harmonic.cpp \
-std=c++11 \
-I../libigl/include \
-I/usr/local/include/eigen3 \
-I/usr/local/include/eigen3/unsupported \
-dynamiclib -o bbw.dylib \
-DNDEBUG \
/opt/local/lib/gcc47/libgomp.a \
-g -O3 -fopenmp -Wall -Wshadow -Wno-sign-compare
# For some reason this seemed faster in practice, but slower on the bbw.py test.
# Did I compile in between those tests?
clang++-mp-3.3 -fPIC \
bbw.cpp mvc.cpp harmonic.cpp \
-std=c++11 \
-I../libigl/include \
-I/usr/local/include/eigen3 \
-I/usr/local/include/eigen3/unsupported \
-dynamiclib -o bbw.dylib \
-DNDEBUG \
-g -O4 -Wall -Wshadow -Wno-sign-compare
# Linux
g++ -fPIC \
bbw.cpp mvc.cpp harmonic.cpp \
-I../libigl/include \
-Ipath/to/Eigen???? \
-shared -o bbw.so \
-g -O2 -Wall -Wshadow -Wno-sign-compare
# Cygwin?
g++ -fPIC \
bbw.cpp mvc.cpp \
-Ipath/to/igl???? \
-Ipath/to/Eigen???? \
-shared -o bbw.dll \
-g -O2 -Wall -Wshadow -Wno-sign-compare
'''
ffi = FFI()
ffi.cdef("""
typedef double real_t;
typedef int index_t;
// Returns 0 for success, anything else is an error.
int bbw(
/// Input Parameters
// 'vertices' is a pointer to num_vertices*kVertexDimension floating point values,
// packed: x0, y0, z0, x1, y1, z1, ...
// In other words, a num_vertices-by-kVertexDimension matrix packed row-major.
int num_vertices, real_t* vertices,
// 'faces' is a pointer to num_faces*3 integers,
// where each face is three vertex indices: f0.v0, f0.v1, f0.v2, f1.v0, f1.v1, f1.v2, ...
// Face i's vertices are: vertices[ faces[3*i]*2 ], vertices[ faces[3*i+1]*2 ], vertices[ faces[3*i+2]*2 ]
// In other words, a num_faces-by-3 matrix packed row-major.
int num_faces, index_t* faces,
// 'skeleton_vertices' is a pointer to num_skeleton_vertices*kVertexDimension floating point values,
// packed the same way as 'vertices' (NOTE: And whose positions must also exist inside 'vertices'.)
int num_skeleton_vertices, real_t* skeleton_vertices,
// 'skeleton_point_handles' is a pointer to num_skeleton_point_handles integers,
// where each element "i" in skeleton_point_handles references the vertex whose data
// is located at skeleton_vertices[ skeleton_point_handles[i]*kVertexDimension ].
int num_skeleton_point_handles, index_t* skeleton_point_handles,
// TODO: Take skeleton bone edges and cage edges
/// Output Parameters
// 'Wout' is a pointer to num_vertices*num_skeleton_vertices values.
// Upon return, W will be filled with each vertex in 'num_vertices' weight for
// each skeleton vertex in 'num_skeleton_vertices'.
// The data layout is that all 'num_skeleton_vertices' weights for vertex 0
// appear before all 'num_skeleton_vertices' weights for vertex 1, and so on.
// In other words, a num_vertices-by-num_skeleton_vertices matrix packed row-major.
real_t* Wout
);
// Returns 0 for success, anything else is an error.
int mvc(
/// Input Parameters
// 'vertices' is a pointer to num_vertices*2 floating point values,
// packed: x0, y0, x1, y1, ...
// In other words, a num_vertices-by-2 matrix packed row-major.
int num_vertices, real_t* vertices,
// 'line_loop' is a pointer to num_line_loop*2 floating point values,
// packed: x0, y0, x1, y1, ...
// In other words, a num_line_loop-by-2 matrix packed row-major.
int num_line_loop, real_t* line_loop,
/// Output Parameters
// 'Wout' is a pointer to num_vertices*num_line_loop values.
// Upon return, W will be filled with each vertex in 'num_vertices' weight for
// each vertex in 'line_loop'.
// The data layout is that all 'num_line_loop' weights for vertex 0
// appear before all 'num_line_loop' weights for vertex 1, and so on.
// In other words, a num_vertices-by-num_line_loop matrix packed row-major.
real_t* Wout
);
// Returns 0 for success, anything else is an error.
int harmonic(
/// Input Parameters
// 'vertices' is a pointer to num_vertices*kVertexDimension floating point values,
// packed: x0, y0, z0, x1, y1, z1, ...
// In other words, a num_vertices-by-kVertexDimension matrix packed row-major.
int num_vertices, real_t* vertices,
// 'faces' is a pointer to num_faces*3 integers,
// where each face is three vertex indices: f0.v0, f0.v1, f0.v2, f1.v0, f1.v1, f1.v2, ...
// Face i's vertices are: vertices[ faces[3*i]*2 ], vertices[ faces[3*i+1]*2 ], vertices[ faces[3*i+2]*2 ]
// In other words, a num_faces-by-3 matrix packed row-major.
int num_faces, index_t* faces,
// 'boundary_indices' is a pointer to num_boundary_vertices integers,
// where each element "i" in boundary_indices references the vertex whose data
// is located at vertices[ boundary_indices[i]*kVertexDimension ].
int num_boundary_indices, index_t* boundary_indices,
// Power of the harmonic operation (1 is harmonic, 2 is bi-harmonic, etc ),
int power,
/// Output Parameters
// 'Wout' is a pointer to num_vertices*num_boundary_indices values.
// Upon return, W will be filled with each vertex in 'num_vertices' weight for
// each boundary vertex in 'boundary_indices'.
// The data layout is that all 'num_boundary_indices' weights for vertex 0
// appear before all 'num_boundary_indices' weights for vertex 1, and so on.
// In other words, a num_vertices-by-num_boundary_indices matrix packed row-major.
real_t* Wout
);
""")
import ctypes
index_t = ctypes.c_int
real_t = ctypes.c_double
def platform_shared_library_suffix():
import sys
result = '.so'
if 'win' in sys.platform.lower(): result = '.dll'
## No else if, because we want darwin to override win (which is a substring of darwin)
if 'darwin' in sys.platform.lower(): result = '.dylib'
return result
libbbw = ffi.dlopen( os.path.join( os.path.dirname( __file__ ), 'bbw' + platform_shared_library_suffix() ) )
class BBWError( Exception ): pass
def bbw( vertices, faces, skeleton_handle_vertices, skeleton_point_handles ):
'''
Given an N-by-(2 or 3) numpy array 'vertices' of 2D or 3D vertices,
an M-by-3 numpy array 'faces' of indices into 'vertices',
an H-by-(2 or 3) numpy.array 'skeleton_handle_vertices' of 2D or 3D vertices,
a numpy array 'skeleton_point_handles' of indices into 'skeleton_handle_vertices'
which are the point handles,
returns a N-by-H numpy.array of weights per vertex per handle.
NOTE: All the vertices in 'skeleton_handle_vertices' must also exist in 'vertices'.
'''
import numpy
## Make sure the input values have their data in a way easy to access from C.
vertices = numpy.ascontiguousarray( numpy.asarray( vertices, dtype = real_t ) )
faces = numpy.ascontiguousarray( numpy.asarray( faces, dtype = index_t ) )
skeleton_handle_vertices = numpy.ascontiguousarray( numpy.asarray( skeleton_handle_vertices, dtype = real_t ) )
skeleton_point_handles = numpy.ascontiguousarray( numpy.asarray( skeleton_point_handles, dtype = index_t ) )
## We allow for 2D or 3D vertices and skeleton_handle_vertices, but
## the dimensions must match.
assert vertices.shape[1] == skeleton_handle_vertices.shape[1]
assert len( vertices.shape ) == 2
assert vertices.shape[1] in (2,3)
## Turn 2D vertices into 3D vertices by using z = 0.
if vertices.shape[1] == 2:
vertices2d = vertices
vertices = numpy.ascontiguousarray( numpy.zeros( ( len( vertices ), 3 ), dtype = real_t ) )
vertices[:,:2] = vertices2d
assert len( faces.shape ) == 2
assert faces.shape[1] == 3
assert len( skeleton_handle_vertices.shape ) == 2
assert skeleton_handle_vertices.shape[1] in (2,3)
## Turn 2D vertices into 3D vertices by using z = 0.
if skeleton_handle_vertices.shape[1] == 2:
skeleton_handle_vertices2d = skeleton_handle_vertices
skeleton_handle_vertices = numpy.ascontiguousarray( numpy.zeros( ( len( skeleton_handle_vertices ), 3 ), dtype = real_t ) )
skeleton_handle_vertices[:,:2] = skeleton_handle_vertices2d
assert len( skeleton_point_handles.shape ) == 1
assert len( skeleton_point_handles ) == len( set( skeleton_point_handles ) )
Wout = numpy.empty( ( len( vertices ), len( skeleton_handle_vertices ) ), dtype = real_t )
# debugger()
result = libbbw.bbw(
len( vertices ), ffi.cast( 'real_t*', vertices.ctypes.data ),
len( faces ), ffi.cast( 'index_t*', faces.ctypes.data ),
len( skeleton_handle_vertices ), ffi.cast( 'real_t*', skeleton_handle_vertices.ctypes.data ),
len( skeleton_point_handles ), ffi.cast( 'index_t*', skeleton_point_handles.ctypes.data ),
ffi.cast( 'real_t*', Wout.ctypes.data )
)
if result != 0:
raise BBWError( 'bbw() reported an error' )
return Wout
def harmonic( vertices, faces, boundary_indices, power ):
'''
Given an N-by-2 or 3 numpy array 'vertices' of 2D or 3D vertices,
an M-by-3 numpy array 'faces' of indices into 'vertices',
a length-H sequence 'boundary_indices' of indices into 'vertices' representing the boundary line loop,
an integer 'power' representing the harmonic power (1 is harmonic, 2 is bi-harmonic, etc),
returns a N-by-H numpy.array of weights per vertex per handle.
'''
import numpy
## Make sure the input values have their data in a way easy to access from C.
vertices = numpy.ascontiguousarray( numpy.asarray( vertices, dtype = real_t ) )
faces = numpy.ascontiguousarray( numpy.asarray( faces, dtype = index_t ) )
boundary_indices = numpy.ascontiguousarray( numpy.asarray( boundary_indices, dtype = index_t ) )
assert len( vertices.shape ) == 2
assert vertices.shape[1] in (2,3)
## Turn 2D vertices into 3D vertices by using z = 0.
if vertices.shape[1] == 2:
vertices2d = vertices
vertices = numpy.ascontiguousarray( numpy.zeros( ( len( vertices ), 3 ), dtype = real_t ) )
vertices[:,:2] = vertices2d
assert len( faces.shape ) == 2
assert faces.shape[1] == 3
assert len(set( boundary_indices.tolist() )) == len( boundary_indices )
assert min( boundary_indices ) >= 0
assert max( boundary_indices ) < len( vertices )
Wout = numpy.empty( ( len( vertices ), len( boundary_indices ) ), dtype = real_t )
# debugger()
result = libbbw.harmonic(
len( vertices ), ffi.cast( 'real_t*', vertices.ctypes.data ),
len( faces ), ffi.cast( 'index_t*', faces.ctypes.data ),
len( boundary_indices ), ffi.cast( 'index_t*', boundary_indices.ctypes.data ),
power,
ffi.cast( 'real_t*', Wout.ctypes.data )
)
if result != 0:
raise BBWError( 'harmonic() reported an error' )
return Wout
def mvc( vertices, line_loop ):
'''
Given an N-by-2 numpy array 'vertices' of 2D vertices and
an H-by-2 numpy array 'line_loop' of vertices representing a closed polyline,
returns a N-by-H numpy.array of weights per vertex per vertex in 'line_loop'.
'''
import numpy
## Make sure the input values have their data in a way easy to access from C.
vertices = numpy.ascontiguousarray( numpy.asarray( vertices, dtype = real_t ) )
line_loop = numpy.ascontiguousarray( numpy.asarray( line_loop, dtype = real_t ) )
## Check the dimensions.
assert vertices.shape[1] == line_loop.shape[1]
assert len( vertices.shape ) == 2
assert vertices.shape[1] == 2
Wout = numpy.empty( ( len( vertices ), len( line_loop ) ), dtype = real_t )
result = libbbw.mvc(
len( vertices ), ffi.cast( 'real_t*', vertices.ctypes.data ),
len( line_loop ), ffi.cast( 'real_t*', line_loop.ctypes.data ),
ffi.cast( 'real_t*', Wout.ctypes.data )
)
if result != 0:
raise BBWError( 'bbw() reported an error' )
return Wout
def test_OBJ( path, num_handle_points = None ):
from numpy import asarray, asfarray, ones
vs = [ list( map( float, line.strip().split()[1:] ) ) for line in open( path ) if len( line.strip() ) > 0 and line.strip().split()[0] == 'v' ]
#vs = asfarray( vs )[:,:2]
#vs3d = ones( ( len( vs ), 3 ) )
#vs3d[:,:2] = vs
#vs = vs3d
faces = [ [ int( vbundle.split('/')[0] )-1 for vbundle in line.strip().split()[1:] ] for line in open( path ) if len( line.strip() ) > 0 and line.strip().split()[0] == 'f' ]
faces = asarray( faces, dtype = int )
print 'Loaded', len( vs ), 'vertices and ', len( faces ), 'faces from:', path
## Choose 'num_handle_points' points evenly chosen from the list of points, skipping
## the first and last in case something's up with them.
if num_handle_points is None: num_handle_points = 2
handle_points = [ ((h+1)*len(vs))//(num_handle_points+1) for h in range( num_handle_points ) ]
assert len( set( handle_points ) ) == len( handle_points )
assert min( handle_points ) >= 0
assert max( handle_points ) < len( vs )
# debugger()
## To test a single handle, uncomment the following line.
#handle_points = handle_points[:1]
import time
duration = time.time()
W = bbw( vs, faces, [ vs[i] for i in handle_points ], list(range(len( handle_points ))) )
duration = time.time() - duration
print 'bbw() took', duration, 'seconds'
print W
def test_simple_bbw():
print 'test_simple_bbw()'
vs = array([(-1, -1), (1, -1), (1, 1), (-1, 1), (0, 0)])
faces = array([[3, 0, 4], [4, 1, 2], [1, 4, 0], [4, 2, 3]])
handle_points = [ 0 ]
W = bbw( vs, faces, [ vs[i] for i in handle_points ], list(range(len( handle_points ))) )
print W
def test_simple_harmonic():
print 'test_simple_harmonic()'
## A square and a point inside.
vs = array([(-1, -1), (1, -1), (1, 1), (-1, 1), (0, 0)])
faces = array([[3, 0, 4], [4, 1, 2], [1, 4, 0], [4, 2, 3]])
boundary_indices = [ 0, 1, 2, 3 ]
W = harmonic( vs, faces, boundary_indices, 1 )
print W
def test_simple_mvc():
print 'test_simple_mvc()'
vs = array([(-1, -1), (1, -1), (1, 1), (-1, 1), (0, 0)])
cage_loop = [(-1, -1), (1, -1), (1, 1), (-1, 1)]
W = mvc( vs, cage_loop )
print W
def main():
import sys
if len( sys.argv ) > 1:
test_OBJ( sys.argv[1], int( sys.argv[2] ) if len( sys.argv ) > 2 else None )
else:
#test_simple_bbw()
#test_simple_mvc()
test_simple_harmonic()
if __name__ == '__main__': main()
| |
from __future__ import absolute_import, unicode_literals
import sys
from vine import Thenable, promise, maybe_promise
from kombu.exceptions import HttpError
from kombu.five import items, python_2_unicode_compatible
from kombu.utils import coro
from kombu.utils.encoding import bytes_to_str
from kombu.utils.functional import maybe_list, memoize
try: # pragma: no cover
from http.client import responses
except ImportError:
from httplib import responses # noqa
__all__ = ['Headers', 'Response', 'Request']
PYPY = hasattr(sys, 'pypy_version_info')
@memoize(maxsize=1000)
def normalize_header(key):
return '-'.join(p.capitalize() for p in key.split('-'))
class Headers(dict):
# TODO: This is just a regular dict and will not perform normalization
# when looking up keys etc.
#: Set when all of the headers have been read.
complete = False
#: Internal attribute used to keep track of continuation lines.
_prev_key = None
@Thenable.register
@python_2_unicode_compatible
class Request(object):
"""A HTTP Request.
:param url: The URL to request.
:param method: The HTTP method to use (defaults to ``GET``).
:keyword headers: Optional headers for this request
(:class:`dict` or :class:`~kombu.async.http.Headers`).
:keyword body: Optional body for this request.
:keyword connect_timeout: Connection timeout in float seconds
(default 30.0).
:keyword timeout: Time in float seconds before the request times out
(default 30.0).
:keyword follow_redirects: Specify if the client should follow redirects
(enabled by default).
:keyword max_redirects: Maximum number of redirects (default 6).
:keyword use_gzip: Allow the server to use gzip compression (enabled by
default).
:keyword validate_cert: Set to true if the server certificate should be
verified when performing ``https://`` requests (enabled by default).
:keyword auth_username: Username for HTTP authentication.
:keyword auth_password: Password for HTTP authentication.
:keyword auth_mode: Type of HTTP authentication (``basic`` or ``digest``).
:keyword user_agent: Custom user agent for this request.
:keyword network_interace: Network interface to use for this request.
:keyword on_ready: Callback to be called when the response has been
received. Must accept single ``response`` argument.
:kewyord on_stream: Optional callback to be called every time body content
has been read from the socket. If specified then the response body
and buffer attributes will not be available.
:keyword on_timeout: Optional callback to be called if the request
times out.
:keyword on_header: Optional callback to be called for every header line
received from the server. The signature is ``(headers, line)``
and note that if you want ``response.headers`` to be populated
then your callback needs to also call
``client.on_header(headers, line)``.
:keyword on_prepare: Optional callback that is implementation specific
(e.g. curl client will pass the ``curl`` instance to this callback).
:keyword proxy_host: Optional proxy host. Note that a ``proxy_port`` must
also be provided or a :exc:`ValueError` will be raised.
:keyword proxy_username: Optional username to use when logging in
to the proxy.
:keyword proxy_password: Optional password to use when authenticating
with the proxy server.
:keyword ca_certs: Custom CA certificates file to use.
:keyword client_key: Optional filename for client SSL key.
:keyword client_cert: Optional filename for client SSL certificate.
"""
body = user_agent = network_interface = \
auth_username = auth_password = auth_mode = \
proxy_host = proxy_port = proxy_username = proxy_password = \
ca_certs = client_key = client_cert = None
connect_timeout = 30.0
request_timeout = 30.0
follow_redirects = True
max_redirects = 6
use_gzip = True
validate_cert = True
if not PYPY: # pragma: no cover
__slots__ = ('url', 'method', 'on_ready', 'on_timeout', 'on_stream',
'on_prepare', 'on_header', 'headers',
'__weakref__', '__dict__')
def __init__(self, url, method='GET', on_ready=None, on_timeout=None,
on_stream=None, on_prepare=None, on_header=None,
headers=None, **kwargs):
self.url = url
self.method = method or self.method
self.on_ready = maybe_promise(on_ready) or promise()
self.on_timeout = maybe_promise(on_timeout)
self.on_stream = maybe_promise(on_stream)
self.on_prepare = maybe_promise(on_prepare)
self.on_header = maybe_promise(on_header)
if kwargs:
for k, v in items(kwargs):
setattr(self, k, v)
if not isinstance(headers, Headers):
headers = Headers(headers or {})
self.headers = headers
def then(self, callback, errback=None):
self.on_ready.then(callback, errback)
def __repr__(self):
return '<Request: {0.method} {0.url} {0.body}>'.format(self)
class Response(object):
"""HTTP Response.
:param request: See :attr:`request`.
:keyword code: See :attr:`code`.
:keyword headers: See :attr:`headers`.
:keyword buffer: See :attr:`buffer`
:keyword effective_url: See :attr:`effective_url`.
:keyword status: See :attr:`status`.
.. attribute:: request
:class:`Request` object used to get this response.
.. attribute:: code
HTTP response code (e.g. 200, 404, or 500).
.. attribute:: headers
HTTP headers for this response (:class:`Headers`).
.. attribute:: buffer
Socket read buffer.
.. attribute:: effective_url
The destination url for this request after following redirects.
.. attribute:: error
Error instance if the request resulted in a HTTP error code.
.. attribute:: status
Human equivalent of :attr:`code`, e.g. ``OK``, `Not found`, or
'Internal Server Error'.
"""
if not PYPY: # pragma: no cover
__slots__ = ('request', 'code', 'headers', 'buffer', 'effective_url',
'error', 'status', '_body', '__weakref__')
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, status=None):
self.request = request
self.code = code
self.headers = headers if headers is not None else Headers()
self.buffer = buffer
self.effective_url = effective_url or request.url
self._body = None
self.status = status or responses.get(self.code, 'Unknown')
self.error = error
if self.error is None and (self.code < 200 or self.code > 299):
self.error = HttpError(self.code, self.status, self)
def raise_for_error(self):
"""Raise :class:`~kombu.exceptions.HttpError` if the request resulted
in a HTTP error code."""
if self.error:
raise self.error
@property
def body(self):
"""The full contents of the response body.
Note that accessing this propery will evaluate the buffer
and subsequent accesses will be cached.
"""
if self._body is None:
if self.buffer is not None:
self._body = self.buffer.getvalue()
return self._body
@coro
def header_parser(keyt=normalize_header):
while 1:
(line, headers) = yield
if line.startswith('HTTP/'):
continue
elif not line:
headers.complete = True
continue
elif line[0].isspace():
pkey = headers._prev_key
headers[pkey] = ' '.join([headers.get(pkey) or '', line.lstrip()])
else:
key, value = line.split(':', 1)
key = headers._prev_key = keyt(key)
headers[key] = value.strip()
class BaseClient(object):
Headers = Headers
Request = Request
Response = Response
def __init__(self, hub, **kwargs):
self.hub = hub
self._header_parser = header_parser()
def perform(self, request, **kwargs):
for req in maybe_list(request):
if not isinstance(req, self.Request):
req = self.Request(req, **kwargs)
self.add_request(req)
def add_request(self, request):
raise NotImplementedError('must implement add_request')
def close(self):
pass
def on_header(self, headers, line):
try:
self._header_parser.send((bytes_to_str(line), headers))
except StopIteration:
self._header_parser = header_parser()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
| |
import re
import struct
import time
import socket, select
import Queue, threading
from collections import namedtuple
import commands
class ISCPMessage(object):
"""Deals with formatting and parsing data wrapped in an ISCP
containers. The docs say:
ISCP (Integra Serial Control Protocol) consists of three
command characters and parameter character(s) of variable
length.
It seems this was the original protocol used for communicating
via a serial cable.
"""
def __init__(self, data):
self.data = data
def __str__(self):
# ! = start character
# 1 = destination unit type, 1 means receiver
# End character may be CR, LF or CR+LF, according to doc
return '!1%s\r' % self.data
@classmethod
def parse(self, data):
EOF = '\x1a'
TERMINATORS = ['\n', '\r']
assert data[:2] == '!1'
eof_offset = -1
# EOF can be followed by CR/LF/CR+LF
if data[eof_offset] in TERMINATORS:
eof_offset -= 1
if data[eof_offset] in TERMINATORS:
eof_offset -= 1
assert data[eof_offset] == EOF
return data[2:eof_offset]
class eISCPPacket(object):
"""For communicating over Ethernet, traditional ISCP messages are
wrapped inside an eISCP package.
"""
header = namedtuple('header', (
'magic, header_size, data_size, version, reserved'))
def __init__(self, iscp_message):
iscp_message = str(iscp_message)
# We attach data separately, because Python's struct module does
# not support variable length strings,
header = struct.pack(
'! 4s I I b 3b',
'ISCP', # magic
16, # header size (16 bytes)
len(iscp_message), # data size
0x01, # version
0x00, 0x00, 0x00 # reserved
)
self._bytes = "%s%s" % (header, iscp_message)
# __new__, string subclass?
def __str__(self):
return self._bytes
@classmethod
def parse(cls, bytes):
"""Parse the eISCP package given by ``bytes``.
"""
h = cls.parse_header(bytes[:16])
data = bytes[h.header_size:h.header_size + h.data_size]
assert len(data) == h.data_size
return data
@classmethod
def parse_header(self, bytes):
"""Parse the header of an eISCP package.
This is useful when reading data in a streaming fashion,
because you can subsequently know the number of bytes to
expect in the packet.
"""
# A header is always 16 bytes in length
assert len(bytes) == 16
# Parse the header
magic, header_size, data_size, version, reserved = \
struct.unpack('! 4s I I b 3s', bytes)
# Strangly, the header contains a header_size field.
assert magic == 'ISCP'
assert header_size == 16
return eISCPPacket.header(
magic, header_size, data_size, version, reserved)
def command_to_packet(command):
"""Convert an ascii command like (PVR00) to the binary data we
need to send to the receiver.
"""
return str(eISCPPacket(ISCPMessage(command)))
def normalize_command(command):
"""Ensures that various ways to refer to a command can be used."""
command = command.lower()
command = command.replace('_', ' ')
command = command.replace('-', ' ')
return command
def command_to_iscp(command, arguments=None, zone=None):
"""Transform the given given high-level command to a
low-level ISCP message.
Raises :class:`ValueError` if `command` is not valid.
This exposes a system of human-readable, "pretty"
commands, which is organized into three parts: the zone, the
command, and arguments. For example::
command('power', 'on')
command('power', 'on', zone='main')
command('volume', 66, zone='zone2')
As you can see, if no zone is given, the main zone is assumed.
Instead of passing three different parameters, you may put the
whole thing in a single string, which is helpful when taking
input from users::
command('power on')
command('zone2 volume 66')
To further simplify things, for example when taking user input
from a command line, where whitespace needs escaping, the
following is also supported:
command('power=on')
command('zone2.volume=66')
"""
default_zone = 'main'
command_sep = r'[. ]'
norm = lambda s: s.strip().lower()
# If parts are not explicitly given, parse the command
if arguments is None and zone is None:
# Separating command and args with colon allows multiple args
if ':' in command or '=' in command:
base, arguments = re.split(r'[:=]', command, 1)
parts = [norm(c) for c in re.split(command_sep, base)]
if len(parts) == 2:
zone, command = parts
else:
zone = default_zone
command = parts[0]
# Split arguments by comma or space
arguments = [norm(a) for a in re.split(r'[ ,]', arguments)]
else:
# Split command part by space or dot
parts = [norm(c) for c in re.split(command_sep, command)]
if len(parts) >= 3:
zone, command = parts[:2]
arguments = parts[3:]
elif len(parts) == 2:
zone = default_zone
command = parts[0]
arguments = parts[1:]
else:
raise ValueError('Need at least command and argument')
# Find the command in our database, resolve to internal eISCP command
group = commands.ZONE_MAPPINGS.get(zone, zone)
if not zone in commands.COMMANDS:
raise ValueError('"%s" is not a valid zone' % zone)
prefix = commands.COMMAND_MAPPINGS[group].get(command, command)
if not prefix in commands.COMMANDS[group]:
raise ValueError('"%s" is not a valid command in zone "%s"'
% (command, zone))
# Resolve the argument to the command. This is a bit more involved,
# because some commands support ranges (volume) or patterns
# (setting tuning frequency). In some cases, we might imagine
# providing the user an API with multiple arguments (TODO: not
# currently supported).
argument = arguments[0]
# 1. Consider if there is a alias, e.g. level-up for UP.
try:
value = commands.VALUE_MAPPINGS[group][prefix][argument]
except KeyError:
# 2. See if we can match a range or pattern
for possible_arg in commands.VALUE_MAPPINGS[group][prefix]:
if argument.isdigit():
if isinstance(possible_arg, xrange):
if int(argument) in possible_arg:
# We need to send the format "FF", hex() gives us 0xff
value = hex(int(argument))[2:].zfill(2).upper()
break
# TODO: patterns not yet supported
else:
raise ValueError('"%s" is not a valid argument for command '
'"%s" in zone "%s"' % (argument, command, zone))
return '%s%s' % (prefix, value)
def iscp_to_command(iscp_message):
for zone, zone_cmds in commands.COMMANDS.iteritems():
# For now, ISCP commands are always three characters, which
# makes this easy.
command, args = iscp_message[:3], iscp_message[3:]
if command in zone_cmds:
if args in zone_cmds[command]['values']:
return zone_cmds[command]['name'], \
zone_cmds[command]['values'][args]['name']
else:
match = re.match('[+-]?[0-9a-f]+$', args, re.IGNORECASE)
if match:
return zone_cmds[command]['name'], \
int(args, 16)
else:
return zone_cmds[command]['name'], args
else:
raise ValueError(
'Cannot convert ISCP message to command: %s' % iscp_message)
def filter_for_message(getter_func, msg):
"""Helper that calls ``getter_func`` until a matching message
is found, or the timeout occurs. Matching means the same commands
group, i.e. for sent message MVLUP we would accept MVL13
in response."""
start = time.time()
while True:
candidate = getter_func(0.05)
# It seems ISCP commands are always three characters.
if candidate and candidate[:3] == msg[:3]:
return candidate
# The protocol docs claim that a response should arrive
# within *50ms or the communication has failed*. In my tests,
# however, the interval needed to be at least 200ms before
# I managed to see any response, and only after 300ms
# reproducably, so use a generous timeout.
if time.time() - start > 5.0:
raise ValueError('Timeout waiting for response.')
def parse_info(data):
response = eISCPPacket.parse(data)
# Return string looks something like this:
# !1ECNTX-NR609/60128/DX
info = re.match(r'''
!
(?P<device_category>\d)
ECN
(?P<model_name>[^/]*)/
(?P<iscp_port>\d{5})/
(?P<area_code>\w{2})/
(?P<identifier>.{0,12})
''', response.strip(), re.VERBOSE).groupdict()
return info
class eISCP(object):
"""Implements the eISCP interface to Onkyo receivers.
This uses a blocking interface. The remote end will regularily
send unsolicited status updates. You need to manually call
``get_message`` to query those.
You may want to look at the :meth:`Receiver` class instead, which
uses a background thread.
"""
@classmethod
def discover(cls, timeout=5, clazz=None):
"""Try to find ISCP devices on network.
Waits for ``timeout`` seconds, then returns all devices found,
in form of a list of dicts.
"""
onkyo_port = 60128
onkyo_magic = str(eISCPPacket('!xECNQSTN'))
# Broadcast magic
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(0) # So we can use select()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind(('0.0.0.0', 0))
sock.sendto(onkyo_magic, ('255.255.255.255', onkyo_port))
found_receivers = []
while True:
ready = select.select([sock], [], [], timeout)
if not ready[0]:
break
data, addr = sock.recvfrom(1024)
info = parse_info(data)
# Give the user a ready-made receiver instance. It will only
# connect on demand, when actually used.
receiver = (clazz or eISCP)(addr[0], int(info['iscp_port']))
receiver.info = info
found_receivers.append(receiver)
sock.close()
return found_receivers
def __init__(self, host, port=60128):
self.host = host
self.port = port
self._info = None
self.command_socket = None
def __repr__(self):
if self.info and self.info.get('model_name'):
model = self.info['model_name']
else:
model = 'unknown'
string = "<%s(%s) %s:%s>" % (
self.__class__.__name__, model, self.host, self.port)
return string
@property
def info(self):
if not self._info:
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(0)
sock.bind(('0.0.0.0', 0))
sock.sendto(str(eISCPPacket('!xECNQSTN')), (self.host, self.port))
ready = select.select([sock], [], [], 0.1)
if ready[0]:
data = sock.recv(1024)
self._info = parse_info(data)
sock.close()
return self._info
@info.setter
def info(self, value):
self._info = value
def _ensure_socket_connected(self):
if self.command_socket is None:
self.command_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.command_socket.connect((self.host, self.port))
self.command_socket.setblocking(0)
def disconnect(self):
try:
self.command_socket.close()
except:
pass
self.command_socket = None
def __enter__(self):
self._ensure_socket_connected()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disconnect()
def send(self, iscp_message):
"""Send a low-level ISCP message, like ``MVL50``.
This does not return anything, nor does it wait for a response
from the receiver. You can query responses via :meth:`get`,
or use :meth:`raw` to send a message and waiting for one.
"""
self._ensure_socket_connected()
self.command_socket.send(command_to_packet(iscp_message))
def get(self, timeout=0.1):
"""Return the next message sent by the receiver, or, after
``timeout`` has passed, return ``None``.
"""
self._ensure_socket_connected()
ready = select.select([self.command_socket], [], [], timeout or 0)
if ready[0]:
header_bytes = self.command_socket.recv(16)
header = eISCPPacket.parse_header(header_bytes)
message = self.command_socket.recv(header.data_size)
return ISCPMessage.parse(message)
def raw(self, iscp_message):
"""Send a low-level ISCP message, like ``MVL50``, and wait
for a response.
While the protocol is designed to acknowledge each message with
a response, there is no fool-proof way to differentiate those
from unsolicited status updates, though we'll do our best to
try. Generally, this won't be an issue, though in theory the
response this function returns to you sending ``SLI05`` may be
an ``SLI06`` update from another controller.
It'd be preferable to design your app in a way where you are
processing all incoming messages the same way, regardless of
their origin.
"""
while self.get(False):
# Clear all incoming messages. If not yet queried,
# they are lost. This is so that we can find the real
# response to our sent command later.
pass
self.send(iscp_message)
return filter_for_message(self.get, iscp_message)
def command(self, command, arguments=None, zone=None):
"""Send a high-level command to the receiver, return the
receiver's response formatted has a command.
This is basically a helper that combines :meth:`raw`,
:func:`command_to_iscp` and :func:`iscp_to_command`.
"""
iscp_message = command_to_iscp(command, arguments, zone)
response = self.raw(iscp_message)
if response:
return iscp_to_command(response)
def power_on(self):
"""Turn the receiver power on."""
return self.command('power', 'on')
def power_off(self):
"""Turn the receiver power off."""
return self.command('power', 'off')
class Receiver(eISCP):
"""Changes the behaviour of :class:`eISCP` to use a background
thread for network operations. This allows receiving messages
from the receiver via a callback::
def message_received(message):
print message
receiver = Receiver('...')
receiver.on_message = message_received
The argument ``message`` is
"""
@classmethod
def discover(cls, timeout=5, clazz=None):
return eISCP.discover(timeout, clazz or Receiver)
def _ensure_thread_running(self):
if not getattr(self, '_thread', False):
self._stop = False
self._queue = Queue.Queue()
self._thread = threading.Thread(target=self._thread_loop)
self._thread.start()
def disconnect(self):
self._stop = True
self._thread.join()
self._thread = None
def send(self, iscp_message):
"""Like :meth:`eISCP.send`, but sends asynchronously via the
background thread.
"""
self._ensure_thread_running()
self._queue.put((iscp_message, None, None))
def get(self, *a, **kw):
"""Not supported by this class. Use the :attr:`on_message``
hook to handle incoming messages.
"""
raise NotImplementedError()
def raw(self, iscp_message):
"""Like :meth:`eISCP.raw`.
"""
self._ensure_thread_running()
event = threading.Event()
result = []
self._queue.put((iscp_message, event, result))
event.wait()
if isinstance(result[0], Exception):
raise result[0]
return result[0]
def _thread_loop(self):
def trigger(message):
if self.on_message:
self.on_message(message)
eISCP._ensure_socket_connected(self)
try:
while not self._stop:
# Clear all incoming message first.
while True:
msg = eISCP.get(self, False)
if not msg:
break
trigger(msg)
# Send next message
try:
item = self._queue.get(timeout=0.01)
except Queue.Empty:
continue
if item:
message, event, result = item
eISCP.send(self, message)
# Wait for a response, if the caller so desires
if event:
try:
# XXX We are losing messages here, since
# those are not triggering the callback!
# eISCP.raw() really has the same problem,
# messages being dropped without a chance
# to get() them. Maybe use a queue after all.
response = filter_for_message(
super(Receiver, self).get, message)
except ValueError, e:
# No response received within timeout
result.append(e)
else:
result.append(response)
# Mark as processed
event.set()
finally:
eISCP.disconnect(self)
| |
__author__ = 'frank'
import os.path
import traceback
import zstacklib.utils.uuidhelper as uuidhelper
from kvmagent import kvmagent
from kvmagent.plugins.imagestore import ImageStoreClient
from zstacklib.utils import jsonobject
from zstacklib.utils import shell
from zstacklib.utils.bash import *
from zstacklib.utils.report import *
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self):
self.totalCapacity = None
self.availableCapacity = None
self.success = None
self.error = None
class RevertVolumeFromSnapshotRsp(AgentResponse):
def __init__(self):
super(RevertVolumeFromSnapshotRsp, self).__init__()
self.newVolumeInstallPath = None
self.size = None
class MergeSnapshotRsp(AgentResponse):
def __init__(self):
super(MergeSnapshotRsp, self).__init__()
self.size = None
self.actualSize = None
class RebaseAndMergeSnapshotsRsp(AgentResponse):
def __init__(self):
super(RebaseAndMergeSnapshotsRsp, self).__init__()
self.size = None
self.actualSize = None
class CheckBitsRsp(AgentResponse):
def __init__(self):
super(CheckBitsRsp, self).__init__()
self.existing = False
class GetMd5Rsp(AgentResponse):
def __init__(self):
super(GetMd5Rsp, self).__init__()
self.md5s = None
class GetBackingFileRsp(AgentResponse):
def __init__(self):
super(GetBackingFileRsp, self).__init__()
self.size = None
self.backingFilePath = None
class GetVolumeSizeRsp(AgentResponse):
def __init__(self):
super(GetVolumeSizeRsp, self).__init__()
self.actualSize = None
self.size = None
class GetVolumeBaseImagePathRsp(AgentResponse):
def __init__(self):
super(GetVolumeBaseImagePathRsp, self).__init__()
self.path = None
class GetQCOW2ReferenceRsp(AgentResponse):
def __init__(self):
super(GetQCOW2ReferenceRsp, self).__init__()
self.referencePaths = None
class ResizeVolumeRsp(AgentResponse):
def __init__(self):
super(ResizeVolumeRsp, self).__init__()
self.size = None
class ListResponse(AgentResponse):
def __init__(self):
super(ListResponse, self).__init__()
self.paths = []
class LocalStoragePlugin(kvmagent.KvmAgent):
INIT_PATH = "/localstorage/init"
GET_PHYSICAL_CAPACITY_PATH = "/localstorage/getphysicalcapacity"
CREATE_EMPTY_VOLUME_PATH = "/localstorage/volume/createempty"
CREATE_VOLUME_FROM_CACHE_PATH = "/localstorage/volume/createvolumefromcache"
DELETE_BITS_PATH = "/localstorage/delete"
DELETE_DIR_PATH = "/localstorage/deletedir"
GET_LIST_PATH = "/localstorage/list"
UPLOAD_BIT_PATH = "/localstorage/sftp/upload"
DOWNLOAD_BIT_PATH = "/localstorage/sftp/download"
UPLOAD_TO_IMAGESTORE_PATH = "/localstorage/imagestore/upload"
COMMIT_TO_IMAGESTORE_PATH = "/localstorage/imagestore/commit"
DOWNLOAD_FROM_IMAGESTORE_PATH = "/localstorage/imagestore/download"
REVERT_SNAPSHOT_PATH = "/localstorage/snapshot/revert"
MERGE_SNAPSHOT_PATH = "/localstorage/snapshot/merge"
MERGE_AND_REBASE_SNAPSHOT_PATH = "/localstorage/snapshot/mergeandrebase"
OFFLINE_MERGE_PATH = "/localstorage/snapshot/offlinemerge"
CREATE_TEMPLATE_FROM_VOLUME = "/localstorage/volume/createtemplate"
CHECK_BITS_PATH = "/localstorage/checkbits"
REBASE_ROOT_VOLUME_TO_BACKING_FILE_PATH = "/localstorage/volume/rebaserootvolumetobackingfile"
VERIFY_SNAPSHOT_CHAIN_PATH = "/localstorage/snapshot/verifychain"
REBASE_SNAPSHOT_BACKING_FILES_PATH = "/localstorage/snapshot/rebasebackingfiles"
COPY_TO_REMOTE_BITS_PATH = "/localstorage/copytoremote"
GET_MD5_PATH = "/localstorage/getmd5"
CHECK_MD5_PATH = "/localstorage/checkmd5"
GET_BACKING_FILE_PATH = "/localstorage/volume/getbackingfile"
GET_VOLUME_SIZE = "/localstorage/volume/getsize"
GET_BASE_IMAGE_PATH = "/localstorage/volume/getbaseimagepath"
GET_QCOW2_REFERENCE = "/localstorage/getqcow2reference"
CONVERT_QCOW2_TO_RAW = "/localstorage/imagestore/convert/raw"
RESIZE_VOLUME_PATH = "/localstorage/volume/resize"
LOCAL_NOT_ROOT_USER_MIGRATE_TMP_PATH = "primary_storage_tmp_dir"
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.INIT_PATH, self.init)
http_server.register_async_uri(self.GET_PHYSICAL_CAPACITY_PATH, self.get_physical_capacity)
http_server.register_async_uri(self.CREATE_EMPTY_VOLUME_PATH, self.create_empty_volume)
http_server.register_async_uri(self.CREATE_VOLUME_FROM_CACHE_PATH, self.create_root_volume_from_template)
http_server.register_async_uri(self.DELETE_BITS_PATH, self.delete)
http_server.register_async_uri(self.DELETE_DIR_PATH, self.deletedir)
http_server.register_async_uri(self.GET_LIST_PATH, self.list)
http_server.register_async_uri(self.DOWNLOAD_BIT_PATH, self.download_from_sftp)
http_server.register_async_uri(self.UPLOAD_BIT_PATH, self.upload_to_sftp)
http_server.register_async_uri(self.UPLOAD_TO_IMAGESTORE_PATH, self.upload_to_imagestore)
http_server.register_async_uri(self.COMMIT_TO_IMAGESTORE_PATH, self.commit_to_imagestore)
http_server.register_async_uri(self.DOWNLOAD_FROM_IMAGESTORE_PATH, self.download_from_imagestore)
http_server.register_async_uri(self.REVERT_SNAPSHOT_PATH, self.revert_snapshot)
http_server.register_async_uri(self.MERGE_SNAPSHOT_PATH, self.merge_snapshot)
http_server.register_async_uri(self.MERGE_AND_REBASE_SNAPSHOT_PATH, self.merge_and_rebase_snapshot)
http_server.register_async_uri(self.OFFLINE_MERGE_PATH, self.offline_merge_snapshot)
http_server.register_async_uri(self.CREATE_TEMPLATE_FROM_VOLUME, self.create_template_from_volume)
http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
http_server.register_async_uri(self.REBASE_ROOT_VOLUME_TO_BACKING_FILE_PATH, self.rebase_root_volume_to_backing_file)
http_server.register_async_uri(self.VERIFY_SNAPSHOT_CHAIN_PATH, self.verify_backing_file_chain)
http_server.register_async_uri(self.REBASE_SNAPSHOT_BACKING_FILES_PATH, self.rebase_backing_files)
http_server.register_async_uri(self.COPY_TO_REMOTE_BITS_PATH, self.copy_bits_to_remote)
http_server.register_async_uri(self.GET_MD5_PATH, self.get_md5)
http_server.register_async_uri(self.CHECK_MD5_PATH, self.check_md5)
http_server.register_async_uri(self.GET_BACKING_FILE_PATH, self.get_backing_file_path)
http_server.register_async_uri(self.GET_VOLUME_SIZE, self.get_volume_size)
http_server.register_async_uri(self.GET_BASE_IMAGE_PATH, self.get_volume_base_image_path)
http_server.register_async_uri(self.GET_QCOW2_REFERENCE, self.get_qcow2_reference)
http_server.register_async_uri(self.CONVERT_QCOW2_TO_RAW, self.convert_qcow2_to_raw)
http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
self.imagestore_client = ImageStoreClient()
def stop(self):
pass
@kvmagent.replyerror
def resize_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
install_path = cmd.installPath
rsp = ResizeVolumeRsp()
shell.call("qemu-img resize %s %s" % (install_path, cmd.size))
ret = linux.qcow2_virtualsize(install_path)
rsp.size = ret
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def convert_qcow2_to_raw(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.convert_image_raw(cmd)
@kvmagent.replyerror
def get_qcow2_reference(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
out = shell.call('find %s -type f' % cmd.searchingDir)
rsp = GetQCOW2ReferenceRsp()
rsp.referencePaths = []
for f in out.split('\n'):
f = f.strip(' \t\r\n')
if not f: continue
backing_file = linux.qcow2_get_backing_file(f)
if backing_file == cmd.path:
rsp.referencePaths.append(f)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_volume_size(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetVolumeSizeRsp()
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.installPath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_volume_base_image_path(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetVolumeBaseImagePathRsp()
rsp.path = linux.get_qcow2_base_image_path_recusively(cmd.installPath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_backing_file_path(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
out = linux.qcow2_get_backing_file(cmd.path)
rsp = GetBackingFileRsp()
if out:
rsp.backingFilePath = out
rsp.size = os.path.getsize(out)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_md5(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetMd5Rsp()
rsp.md5s = []
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
total = 0
written = 0
for to in cmd.md5s:
total = total + os.path.getsize(to.path)
start = 0
end = 10
if cmd.stage:
start, end = get_scale(cmd.stage)
def _get_progress(synced):
logger.debug("getProgress in get_md5")
if not os.path.exists(PFILE):
return synced
last = shell.call('tail -1 %s' % PFILE).strip()
if not last or not last.isdigit():
return synced
percent = int(round((float(written) * 100 + os.path.getsize(to.path) * float(last)) / total * (end - start) / 100) + start)
report.progress_report(str(percent), "report")
return synced
report.resourceUuid = cmd.volumeUuid
if start == 0:
report.progress_report("0", "start")
else:
report.progress_report(str(start), "report")
for to in cmd.md5s:
_, md5, _ = bash_progress_1("pv -n %s 2>%s | md5sum | cut -d ' ' -f 1" % (to.path, PFILE), _get_progress)
rsp.md5s.append({
'resourceUuid': to.resourceUuid,
'path': to.path,
'md5': md5
})
written += os.path.getsize(to.path)
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
if os.path.exists(PFILE):
os.remove(PFILE)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_md5(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
total = 0
written = 0
start = 90
end = 100
if cmd.stage:
start, end = get_scale(cmd.stage)
for to in cmd.md5s:
total = total + os.path.getsize(to.path)
def _get_progress(synced):
logger.debug("getProgress in check_md5")
if not os.path.exists(PFILE):
return synced
last = shell.call('tail -1 %s' % PFILE).strip()
if not last or not last.isdigit():
return synced
percent = int(round((float(written) * 100 + os.path.getsize(to.path) * float(last)) / total * (end - start) / 100) + start)
report.progress_report(percent, "report")
return synced
report.resourceUuid = cmd.volumeUuid
for to in cmd.md5s:
_, dst_md5, _ = bash_progress_1("pv -n %s 2>%s | md5sum | cut -d ' ' -f 1" % (to.path, PFILE), _get_progress)
if dst_md5 != to.md5:
raise Exception("MD5 unmatch. The file[uuid:%s, path:%s]'s md5 (src host:%s, dst host:%s)" %
(to.resourceUuid, to.path, to.md5, dst_md5))
written += os.path.getsize(to.path)
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
if os.path.exists(PFILE):
os.remove(PFILE)
rsp = AgentResponse()
if end == 100:
report.progress_report("100", "finish")
else:
report.progress_report(str(end), "report")
return jsonobject.dumps(rsp)
@staticmethod
def _get_disk_capacity(path):
if not path:
raise Exception('storage path cannot be None')
return linux.get_disk_capacity_by_df(path)
@kvmagent.replyerror
@in_bash
def copy_bits_to_remote(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
chain = sum([linux.qcow2_get_file_chain(p) for p in cmd.paths], [])
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
report.resourceUuid = cmd.uuid
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
start = 10
end = 90
if cmd.stage:
start, end = get_scale(cmd.stage)
total = 0
for path in set(chain):
total = total + os.path.getsize(path)
written = 0
def _get_progress(synced):
logger.debug("getProgress in localstorage-agent, synced: %s, total: %s" % (synced, total))
if not os.path.exists(PFILE):
return synced
fpread = open(PFILE, 'r')
lines = fpread.readlines()
if not lines:
fpread.close()
return synced
last = str(lines[-1]).strip().split('\r')[-1]
if not last or len(last.split()) < 1:
fpread.close()
return synced
line = last.split()[0]
if not line.isdigit():
return synced
if total > 0:
synced = long(line)
if synced < total:
percent = int(round(float(written + synced) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
synced = written
fpread.close()
return synced
for path in set(chain):
PATH = path
PASSWORD = cmd.dstPassword
USER = cmd.dstUsername
IP = cmd.dstIp
PORT = (cmd.dstPort and cmd.dstPort or "22")
DIR = os.path.dirname(path)
if cmd.dstUsername == 'root':
_, _, err = bash_progress_1(
'rsync -av --progress --relative {{PATH}} --rsh="/usr/bin/sshpass -p "{{PASSWORD}}" ssh -o StrictHostKeyChecking=no -p {{PORT}} -l {{USER}}" {{IP}}:/ 1>{{PFILE}}', _get_progress)
if err:
raise err
else:
raise Exception("cannot support migrate to non-root user host")
written += os.path.getsize(path)
bash_errorout('/usr/bin/sshpass -p "{{PASSWORD}}" ssh -o StrictHostKeyChecking=no -p {{PORT}} {{USER}}@{{IP}} "/bin/sync {{PATH}}"')
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
if os.path.exists(PFILE):
os.remove(PFILE)
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def verify_backing_file_chain(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for sp in cmd.snapshots:
if not os.path.exists(sp.path):
raise Exception('cannot find the file[%s]' % sp.path)
if sp.parentPath and not os.path.exists(sp.parentPath):
raise Exception('cannot find the backing file[%s]' % sp.parentPath)
if sp.parentPath:
out = linux.qcow2_get_backing_file(sp.path)
if sp.parentPath != out:
raise Exception("resource[Snapshot or Volume, uuid:%s, path:%s]'s backing file[%s] is not equal to %s" %
(sp.snapshotUuid, sp.path, out, sp.parentPath))
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def rebase_backing_files(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for sp in cmd.snapshots:
if sp.parentPath:
linux.qcow2_rebase_no_check(sp.parentPath, sp.path)
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def check_bits(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckBitsRsp()
rsp.existing = os.path.exists(cmd.path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_template_from_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
dirname = os.path.dirname(cmd.installPath)
if not os.path.exists(dirname):
os.makedirs(dirname, 0755)
linux.create_template(cmd.volumePath, cmd.installPath)
logger.debug('successfully created template[%s] from volume[%s]' % (cmd.installPath, cmd.volumePath))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def revert_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = RevertVolumeFromSnapshotRsp()
install_path = cmd.snapshotInstallPath
new_volume_path = os.path.join(os.path.dirname(install_path), '{0}.qcow2'.format(uuidhelper.uuid()))
linux.qcow2_clone(install_path, new_volume_path)
size = linux.qcow2_virtualsize(new_volume_path)
rsp.newVolumeInstallPath = new_volume_path
rsp.size = size
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = MergeSnapshotRsp()
workspace_dir = os.path.dirname(cmd.workspaceInstallPath)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
linux.create_template(cmd.snapshotInstallPath, cmd.workspaceInstallPath)
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.workspaceInstallPath)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_and_rebase_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
snapshots = cmd.snapshotInstallPaths
count = len(snapshots)
for i in range(count):
if i+1 < count:
target = snapshots[i]
backing_file = snapshots[i+1]
linux.qcow2_rebase_no_check(backing_file, target)
latest = snapshots[0]
rsp = RebaseAndMergeSnapshotsRsp()
workspace_dir = os.path.dirname(cmd.workspaceInstallPath)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
linux.create_template(latest, cmd.workspaceInstallPath)
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.workspaceInstallPath)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def offline_merge_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if not cmd.fullRebase:
linux.qcow2_rebase(cmd.srcPath, cmd.destPath)
else:
tmp = os.path.join(os.path.dirname(cmd.destPath), '%s.qcow2' % uuidhelper.uuid())
linux.create_template(cmd.destPath, tmp)
shell.call("mv %s %s" % (tmp, cmd.destPath))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_physical_capacity(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def rebase_root_volume_to_backing_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
linux.qcow2_rebase_no_check(cmd.backingFilePath, cmd.rootVolumePath)
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def init(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if not os.path.exists(cmd.path):
os.makedirs(cmd.path, 0755)
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_empty_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname)
if cmd.backingFile:
linux.qcow2_create_with_backing_file(cmd.backingFile, cmd.installUrl)
else:
linux.qcow2_create(cmd.installUrl, cmd.size)
except Exception as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = 'unable to create empty volume[uuid:%s, name:%s], %s' % (cmd.uuid, cmd.name, str(e))
rsp.success = False
return jsonobject.dumps(rsp)
logger.debug('successfully create empty volume[uuid:%s, size:%s] at %s' % (cmd.volumeUuid, cmd.size, cmd.installUrl))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_root_volume_from_template(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if not os.path.exists(cmd.templatePathInCache):
rsp.error = "unable to find image in cache"
rsp.success = False
logger.debug('error: %s: %s' % (rsp.error, cmd.templatePathInCache))
return jsonobject.dumps(rsp)
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname, 0775)
linux.qcow2_clone(cmd.templatePathInCache, cmd.installUrl)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def delete(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
kvmagent.deleteImage(cmd.path)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def list(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = ListResponse()
rsp.paths = kvmagent.listPath(cmd.path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def deletedir(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
shell.call('rm -rf %s' % cmd.path)
logger.debug('successfully delete %s' % cmd.path)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
def upload():
if not os.path.exists(cmd.primaryStorageInstallPath):
raise kvmagent.KvmError('cannot find %s' % cmd.primaryStorageInstallPath)
linux.scp_upload(cmd.hostname, cmd.sshKey, cmd.primaryStorageInstallPath, cmd.backupStorageInstallPath, cmd.username, cmd.sshPort)
try:
upload()
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.upload_to_imagestore(cmd, req)
@kvmagent.replyerror
def commit_to_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.commit_to_imagestore(cmd, req)
@kvmagent.replyerror
def download_from_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
linux.scp_download(cmd.hostname, cmd.sshKey, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath, cmd.username, cmd.sshPort)
logger.debug('successfully download %s/%s to %s' % (cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath))
except Exception as e:
content = traceback.format_exc()
logger.warn(content)
err = "unable to download %s/%s, because %s" % (cmd.hostname, cmd.backupStorageInstallPath, str(e))
rsp.error = err
rsp.success = False
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def download_from_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self.imagestore_client.download_from_imagestore(cmd.storagePath, cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath)
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
| |
#-*- python -*-
import logging
from datetime import datetime
import urllib2
# Non-stdlib imports
import pkg_resources
import pymongo
from tg import expose, validate, redirect, flash
from tg.decorators import with_trailing_slash, without_trailing_slash
from pylons import g, c, request, response
import formencode
from formencode import validators
from webob import exc
from ming.orm import session
# Pyforge-specific imports
from allura.app import Application, ConfigOption, SitemapEntry
from allura.app import DefaultAdminController
from allura.lib import helpers as h
from allura.lib.search import search
from allura.lib.decorators import require_post, Property
from allura.lib.security import has_access, require_access
from allura.lib import widgets as w
from allura.lib.widgets.subscriptions import SubscribeForm
from allura.lib.widgets import form_fields as ffw
from allura import model as M
from allura.controllers import BaseController, AppDiscussionController
# Local imports
from forgeblog import model as BM
from forgeblog import version
from forgeblog import widgets
log = logging.getLogger(__name__)
class W:
thread=w.Thread(
page=None, limit=None, page_size=None, count=None,
style='linear')
pager = widgets.BlogPager()
new_post_form = widgets.NewPostForm()
edit_post_form = widgets.EditPostForm()
view_post_form = widgets.ViewPostForm()
label_edit = ffw.LabelEdit()
attachment_add = ffw.AttachmentAdd()
attachment_list = ffw.AttachmentList()
preview_post_form = widgets.PreviewPostForm()
subscribe_form = SubscribeForm()
class ForgeBlogApp(Application):
__version__ = version.__version__
tool_label='Blog'
default_mount_label='Blog'
default_mount_point='blog'
permissions = ['configure', 'read', 'write',
'unmoderated_post', 'post', 'moderate', 'admin']
ordinal=14
installable=True
config_options = Application.config_options
default_external_feeds = []
icons={
24:'images/blog_24.png',
32:'images/blog_32.png',
48:'images/blog_48.png'
}
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = RootController()
self.admin = BlogAdminController(self)
@Property
def external_feeds_list():
def fget(self):
globals = BM.Globals.query.get(app_config_id=self.config._id)
if globals is not None:
external_feeds = globals.external_feeds
else:
external_feeds = self.default_external_feeds
return external_feeds
def fset(self, new_external_feeds):
globals = BM.Globals.query.get(app_config_id=self.config._id)
if globals is not None:
globals.external_feeds = new_external_feeds
elif len(new_external_feeds) > 0:
globals = BM.Globals(app_config_id=self.config._id, external_feeds=new_external_feeds)
if globals is not None:
session(globals).flush()
def main_menu(self):
return [SitemapEntry(self.config.options.mount_label.title(), '.')]
@property
@h.exceptionless([], log)
def sitemap(self):
menu_id = self.config.options.mount_label.title()
with h.push_config(c, app=self):
return [
SitemapEntry(menu_id, '.')[self.sidebar_menu()] ]
@property
def show_discussion(self):
if 'show_discussion' in self.config.options:
return self.config.options['show_discussion']
else:
return True
@h.exceptionless([], log)
def sidebar_menu(self):
base = c.app.url
links = [
SitemapEntry('Home', base),
SitemapEntry('Search', base + 'search'),
]
if has_access(self, 'write')():
links += [ SitemapEntry('New Post', base + 'new') ]
return links
def admin_menu(self):
admin_url = c.project.url() + 'admin/' + self.config.options.mount_point + '/'
# temporarily disabled until some bugs are fixed
links = [SitemapEntry('External feeds', admin_url + 'exfeed', className='admin_modal')]
links += super(ForgeBlogApp, self).admin_menu(force_options=True)
return links
#return super(ForgeBlogApp, self).admin_menu(force_options=True)
def install(self, project):
'Set up any default permissions and roles here'
super(ForgeBlogApp, self).install(project)
# Setup permissions
role_admin = M.ProjectRole.by_name('Admin')._id
role_developer = M.ProjectRole.by_name('Developer')._id
role_auth = M.ProjectRole.by_name('*authenticated')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_auth, 'post'),
M.ACE.allow(role_auth, 'unmoderated_post'),
M.ACE.allow(role_developer, 'write'),
M.ACE.allow(role_developer, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin'),
]
def uninstall(self, project):
"Remove all the tool's artifacts from the database"
BM.Attachment.query.remove(dict(app_config_id=c.app.config._id))
BM.BlogPost.query.remove(dict(app_config_id=c.app.config._id))
BM.BlogPostSnapshot.query.remove(dict(app_config_id=c.app.config._id))
super(ForgeBlogApp, self).uninstall(project)
class RootController(BaseController):
def __init__(self):
setattr(self, 'feed.atom', self.feed)
setattr(self, 'feed.rss', self.feed)
self._discuss = AppDiscussionController()
def _check_security(self):
require_access(c.app, 'read')
@expose('jinja:forgeblog:templates/blog/index.html')
@with_trailing_slash
def index(self, page=0, limit=10, **kw):
query_filter = dict(app_config_id=c.app.config._id)
if not has_access(c.app, 'write')():
query_filter['state'] = 'published'
q = BM.BlogPost.query.find(query_filter)
post_count = q.count()
limit, page = h.paging_sanitizer(limit, page, post_count)
posts = q.sort('timestamp', pymongo.DESCENDING) \
.skip(page * limit).limit(limit)
c.form = W.preview_post_form
c.pager = W.pager
return dict(posts=posts, page=page, limit=limit, count=post_count)
@expose('jinja:forgeblog:templates/blog/search.html')
@validate(dict(q=validators.UnicodeString(if_empty=None),
history=validators.StringBool(if_empty=False)))
def search(self, q=None, history=None, **kw):
'local tool search'
results = []
count=0
if not q:
q = ''
else:
results = search(
q,
fq=[
'state_s:published',
'is_history_b:%s' % history,
'project_id_s:%s' % c.project._id,
'mount_point_s:%s'% c.app.config.options.mount_point ])
if results: count=results.hits
return dict(q=q, history=history, results=results or [], count=count)
@expose('jinja:forgeblog:templates/blog/edit_post.html')
@without_trailing_slash
def new(self, **kw):
require_access(c.app, 'write')
now = datetime.utcnow()
post = dict(
state='published')
c.form = W.new_post_form
return dict(post=post)
@expose()
@require_post()
@validate(form=W.edit_post_form, error_handler=new)
@without_trailing_slash
def save(self, **kw):
require_access(c.app, 'write')
post = BM.BlogPost()
for k,v in kw.iteritems():
setattr(post, k, v)
post.neighborhood_id=c.project.neighborhood_id
post.make_slug()
post.commit()
M.Thread.new(discussion_id=post.app_config.discussion_id,
ref_id=post.index_id(),
subject='%s discussion' % post.title)
redirect(h.really_unicode(post.url()).encode('utf-8'))
@without_trailing_slash
@expose()
@validate(dict(
since=h.DateTimeConverter(if_empty=None, if_invalid=None),
until=h.DateTimeConverter(if_empty=None, if_invalid=None),
offset=validators.Int(if_empty=None),
limit=validators.Int(if_empty=None)))
def feed(self, since=None, until=None, offset=None, limit=None):
if request.environ['PATH_INFO'].endswith('.atom'):
feed_type = 'atom'
else:
feed_type = 'rss'
title = '%s - %s' % (c.project.name, c.app.config.options.mount_label)
feed = M.Feed.feed(
dict(project_id=c.project._id, app_config_id=c.app.config._id),
feed_type,
title,
c.app.url,
title,
since, until, offset, limit)
response.headers['Content-Type'] = ''
response.content_type = 'application/xml'
return feed.writeString('utf-8')
@with_trailing_slash
@expose('jinja:allura:templates/markdown_syntax_dialog.html')
def markdown_syntax_dialog(self):
'Static dialog page about how to use markdown.'
return dict()
@expose()
def _lookup(self, year, month, name, *rest):
slug = '/'.join((year, month, urllib2.unquote(name).decode('utf-8')))
post = BM.BlogPost.query.get(slug=slug, app_config_id=c.app.config._id)
if post is None:
raise exc.HTTPNotFound()
return PostController(post), rest
class PostController(BaseController):
def __init__(self, post):
self.post = post
setattr(self, 'feed.atom', self.feed)
setattr(self, 'feed.rss', self.feed)
def _check_security(self):
require_access(self.post, 'read')
@expose('jinja:forgeblog:templates/blog/post.html')
@with_trailing_slash
@validate(dict(page=validators.Int(if_empty=0),
limit=validators.Int(if_empty=25)))
def index(self, page=0, limit=25, **kw):
if self.post.state == 'draft':
require_access(self.post, 'write')
c.form = W.view_post_form
c.subscribe_form = W.subscribe_form
c.thread = W.thread
post_count = self.post.discussion_thread.post_count
limit, page = h.paging_sanitizer(limit, page, post_count)
version = kw.pop('version', None)
post = self._get_version(version)
base_post = self.post
return dict(post=post, base_post=base_post,
page=page, limit=limit, count=post_count)
@expose('jinja:forgeblog:templates/blog/edit_post.html')
@without_trailing_slash
def edit(self, **kw):
require_access(self.post, 'write')
c.form = W.edit_post_form
c.attachment_add = W.attachment_add
c.attachment_list = W.attachment_list
c.label_edit = W.label_edit
return dict(post=self.post)
@without_trailing_slash
@expose('jinja:forgeblog:templates/blog/post_history.html')
def history(self):
posts = self.post.history()
return dict(title=self.post.title, posts=posts)
@without_trailing_slash
@expose('jinja:forgeblog:templates/blog/post_diff.html')
def diff(self, v1, v2):
p1 = self._get_version(int(v1))
p2 = self._get_version(int(v2))
result = h.diff_text(p1.text, p2.text)
return dict(p1=p1, p2=p2, edits=result)
@expose()
@require_post()
@validate(form=W.edit_post_form, error_handler=edit)
@without_trailing_slash
def save(self, delete=None, **kw):
require_access(self.post, 'write')
if delete:
self.post.delete()
flash('Post deleted', 'info')
redirect(h.really_unicode(c.app.url).encode('utf-8'))
for k,v in kw.iteritems():
setattr(self.post, k, v)
self.post.commit()
redirect('.')
@without_trailing_slash
@require_post()
@expose()
def revert(self, version):
require_access(self.post, 'write')
orig = self._get_version(version)
if orig:
self.post.text = orig.text
self.post.commit()
redirect('.')
@expose()
@validate(W.subscribe_form)
def subscribe(self, subscribe=None, unsubscribe=None):
if subscribe:
self.post.subscribe(type='direct')
elif unsubscribe:
self.post.unsubscribe()
redirect(h.really_unicode(request.referer).encode('utf-8'))
@without_trailing_slash
@expose()
@validate(dict(
since=h.DateTimeConverter(if_empty=None, if_invalid=None),
until=h.DateTimeConverter(if_empty=None, if_invalid=None),
offset=validators.Int(if_empty=None),
limit=validators.Int(if_empty=None)))
def feed(self, since=None, until=None, offset=None, limit=None):
if request.environ['PATH_INFO'].endswith('.atom'):
feed_type = 'atom'
else:
feed_type = 'rss'
feed = M.Feed.feed(
dict(ref_id=self.post.index_id()),
feed_type,
'Recent changes to %s' % self.post.title,
self.post.url(),
'Recent changes to %s' % self.post.title,
since, until, offset, limit)
response.headers['Content-Type'] = ''
response.content_type = 'application/xml'
return feed.writeString('utf-8')
def _get_version(self, version):
if not version: return self.post
try:
return self.post.get_version(version)
except ValueError:
raise exc.HTTPNotFound()
class BlogAdminController(DefaultAdminController):
def __init__(self, app):
self.app = app
@without_trailing_slash
@expose('jinja:forgeblog:templates/blog/admin_options.html')
def options(self):
return dict(app=self.app,
allow_config=has_access(self.app, 'configure')())
@without_trailing_slash
@expose()
@require_post()
def set_options(self, show_discussion=False):
self.app.config.options['show_discussion'] = show_discussion and True or False
flash('Blog options updated')
redirect(h.really_unicode(c.project.url()+'admin/tools').encode('utf-8'))
@without_trailing_slash
@expose('jinja:forgeblog:templates/blog/admin_exfeed.html')
def exfeed(self):
#self.app.external_feeds_list = ['feed1', 'feed2']
#log.info("EXFEED: %s" % self.app.external_feeds_list)
feeds_list = []
for feed in self.app.external_feeds_list:
feeds_list.append(feed)
return dict(app=self.app,
feeds_list=feeds_list,
allow_config=has_access(self.app, 'configure')())
@without_trailing_slash
@expose()
@require_post()
def set_exfeed(self, new_exfeed=None, **kw):
exfeed_val = kw.get('exfeed', [])
if type(exfeed_val) == unicode:
tmp_exfeed_list = []
tmp_exfeed_list.append(exfeed_val)
else:
tmp_exfeed_list = exfeed_val
if new_exfeed is not None and new_exfeed != '':
tmp_exfeed_list.append(new_exfeed)
exfeed_list = []
invalid_list = []
v = validators.URL()
for link in tmp_exfeed_list:
try:
v.to_python(link)
exfeed_list.append(link)
except formencode.api.Invalid:
invalid_list.append(link)
self.app.external_feeds_list = exfeed_list
flash('External feeds updated')
if len(invalid_list) > 0:
flash('Invalid link(s): %s' % ','.join(link for link in invalid_list), 'error')
redirect(c.project.url()+'admin/tools')
| |
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 19:35:04 2015
@author: ajaver
"""
import os
import sys
import numpy as np
import pandas as pd
import tables
from tierpsy.helper.params import head_tail_defaults
from tierpsy.analysis.ske_orient.WormClass import WormClass
from tierpsy.helper.misc import TimeCounter, print_flush
def getAnglesDelta(dx, dy):
'''
Calculate angles and fix for any jump between -2pi to 2pi
'''
angles = np.arctan2(dx, dy)
dAngles = np.diff(angles)
# %+1 to cancel shift of diff
positiveJumps = np.where(dAngles > np.pi)[0] + 1
negativeJumps = np.where(dAngles < -np.pi)[0] + 1
#% subtract 2pi from remainging data after positive jumps
for jump in positiveJumps:
angles[jump:] = angles[jump:] - 2 * np.pi
#% add 2pi to remaining data after negative jumps
for jump in negativeJumps:
angles[jump:] = angles[jump:] + 2 * np.pi
#% rotate skeleton angles so that mean orientation is zero
meanAngle = np.mean(angles)
angles = angles - meanAngle
return angles, meanAngle
def calculateHeadTailAng(skeletons, segment4angle, good):
'''
For each skeleton two angles are caculated: one vector between the index 0 and segment4angle ('head'), and the other from the index -1 and -segment4angle-1 ('tail').
'''
angles_head = np.empty(skeletons.shape[0])
angles_head.fill(np.nan)
angles_tail = angles_head.copy()
dx = skeletons[good, segment4angle, 0] - skeletons[good, 0, 0]
dy = skeletons[good, segment4angle, 1] - skeletons[good, 0, 1]
angles_head[good], _ = getAnglesDelta(dx, dy)
dx = skeletons[good, -segment4angle - 1, 0] - skeletons[good, -1, 0]
dy = skeletons[good, -segment4angle - 1, 1] - skeletons[good, -1, 1]
angles_tail[good], _ = getAnglesDelta(dx, dy)
return angles_head, angles_tail
def getBlocksIDs(invalid, max_gap_allowed):
'''The skeleton array is divided in blocks of contingous skeletons with
a gap between unskeletonized frames less than max_gap_allowed'''
good_ind = np.where(~invalid)[0]
delTs = np.diff(good_ind)
block_ind = np.zeros_like(good_ind)
block_ind[0] = 1
for ii, delT in enumerate(delTs):
if delT < max_gap_allowed:
block_ind[ii + 1] = block_ind[ii]
else:
block_ind[ii + 1] = block_ind[ii] + 1
block_ids = np.zeros(invalid.size, dtype=np.int)
tot_blocks = block_ind[-1]
block_ids[good_ind] = block_ind
return block_ids, tot_blocks
def isWormHTSwitched(skeletons, segment4angle=5, max_gap_allowed=10,
window_std=25, min_block_size=250):
'''
Determine if the skeleton is correctly oriented going from head to tail. The skeleton array is divided in blocks of contingous skeletons with a gap between unskeletonized frames less than max_gap_allowed.
For each skeleton two angles are caculated: one vector between the index 0 and segment4angle ('head'), and the other from the index -1 and -segment4angle-1 ('tail'). The amount of head/tail movement is determined by the time rolling (moving) standard deviation (std). If most of the skeletons in the rolling std in a given block are larger for the tail than for the head, the block is flagged as switched. Only blocks larger than min_block_size are used to determine orientation. If a block has less elements than min_block_size it is flagged according to the value of its nearest "big" block.
'''
invalid = np.isnan(skeletons[:, 0, 0])
# get contiguous skeletons blocks
block_ids, tot_blocks = getBlocksIDs(invalid, max_gap_allowed)
# calculate head and tail angles.
angles_head, angles_tail = calculateHeadTailAng(
skeletons, segment4angle, block_ids != 0)
# calculate the rolling std
ts = pd.DataFrame({'head_angle': angles_head, 'tail_angle': angles_tail})
roll_win = ts.rolling(window=window_std, min_periods=window_std - max_gap_allowed)
roll_std = roll_win.std()
# determine if the head in a skeleton has a larger rolling std than the
# tail
roll_std["is_head"] = (roll_std['head_angle'] > roll_std['tail_angle'])
roll_std["block_id"] = block_ids
# this function will return nan if the number of elements in the group is
# less than min_block_size
mean_relevant = lambda x: x.mean() if x.count() > min_block_size else np.nan
# get the probability of a block being a head
head_prob = roll_std.groupby('block_id').agg({'is_head': mean_relevant})
head_prob.loc[0] = np.nan
# fill nan, forward with the last valid observation, and then first
# backward with the next valid observation
head_prob = head_prob.fillna(method='ffill').fillna(method='bfill')
# create flags to determined if the skeleton is switched
is_switch_block = np.squeeze(head_prob.values) < 0.5
is_switch_skel = is_switch_block[block_ids]
return is_switch_skel, roll_std
def correctHeadTail(skeletons_file, **params):
'''
Correct Head Tail orientation using skeleton movement. Head must move more than the tail (have a higher rolling standar deviation). This might fail if the amount of contingously skeletonized frames is too little (a few seconds). Head must be in the first position of the single frame skeleton array, while the tail must be in the last.
max_gap_allowed - maximimun number of consecutive skeletons lost before consider it a new block
window_std - frame windows to calculate the standard deviation
segment4angle - separation between skeleton segments to calculate the angles
min_block_size - consider only around 10s intervals to determine if it is head or tail...
'''
params = head_tail_defaults(skeletons_file, **params)
max_gap_allowed = params['max_gap_allowed']
window_std = params['window_std']
segment4angle = params['segment4angle']
min_block_size = params['min_block_size']
base_name = skeletons_file.rpartition(
'.')[0].rpartition(os.sep)[-1].rpartition('_')[0]
with pd.HDFStore(skeletons_file, 'r') as ske_file_id:
indexes_data = ske_file_id[
'/trajectories_data'][['worm_index_joined', 'skeleton_id']]
# get the first and last frame of each worm_index
rows_indexes = indexes_data.groupby(
'worm_index_joined').agg([min, max])['skeleton_id']
del indexes_data
# check if the skeletonization finished succesfully
with tables.File(skeletons_file, "r") as ske_file_id:
skeleton_table = ske_file_id.get_node('/skeleton')
if 'has_finished' in dir(skeleton_table._v_attrs):
assert skeleton_table._v_attrs['has_finished'] >= 2
progress_timer = TimeCounter('')
for ii, dat in enumerate(rows_indexes.iterrows()):
if ii % 10 == 0:
dd = " Correcting Head-Tail using worm movement. Worm %i of %i." % (
ii + 1, len(rows_indexes))
dd = base_name + dd + ' Total time:' + progress_timer.get_time_str()
print_flush(dd)
worm_index, row_range = dat
worm_data = WormClass(skeletons_file, worm_index,
rows_range=(row_range['min'], row_range['max']))
if not np.all(np.isnan(worm_data.skeleton_length)):
is_switched_skel, roll_std = isWormHTSwitched(worm_data.skeleton,
segment4angle=segment4angle, max_gap_allowed=max_gap_allowed,
window_std=window_std, min_block_size=min_block_size)
worm_data.switchHeadTail(is_switched_skel)
worm_data.writeData()
#%%
print_flush(
'Head-Tail correction using worm movement finished:' +
progress_timer.get_time_str())
with tables.File(skeletons_file, "r+") as ske_file_id:
# Mark a succesful termination
ske_file_id.get_node('/skeleton')._v_attrs['has_finished'] = 3
if __name__ == "__main__":
#root_dir = '/Users/ajaver/Desktop/Gecko_compressed/20150511/'
#base_name = 'Capture_Ch1_11052015_195105'
#root_dir = '/Users/ajaver/Desktop/Gecko_compressed/20150512/'
#base_name = 'Capture_Ch3_12052015_194303'
skeletons_file = root_dir + '/Trajectories/' + base_name + '_skeletons.hdf5'
# correctHeadTail(skeletons_file, max_gap_allowed = 10, \
# window_std = 25, segment4angle = 5, min_block_size = 250)
| |
# Copyright (c) 2014 Ahmed H. Ismail
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import re
from spdx import checksum
from spdx import document
from spdx import version
from spdx.parsers.builderexceptions import CardinalityError
from spdx.parsers.builderexceptions import IncompatibleVersionError
from spdx.parsers.builderexceptions import OrderError
from spdx.parsers.builderexceptions import SPDXValueError
from spdx.parsers import tagvaluebuilders
class DocBuilder(object):
VERS_STR_REGEX = re.compile(r'SPDX-(\d+)\.(\d+)', re.UNICODE)
def __init__(self):
super(DocBuilder, self).__init__()
self.reset_document()
def set_doc_version(self, doc, value):
"""Sets the document version.
Raises value error if malformed value, CardinalityError
if already defined, IncompatibleVersionError if not 1.2.
"""
if not self.doc_version_set:
self.doc_version_set = True
m = self.VERS_STR_REGEX.match(value)
if m is None:
raise SPDXValueError('Document::Version')
else:
vers = version.Version(major=int(m.group(1)),
minor=int(m.group(2)))
if vers == version.Version(major=1, minor=2):
doc.version = vers
return True
else:
raise IncompatibleVersionError(value)
else:
raise CardinalityError('Document::Version')
def set_doc_data_lic(self, doc, res):
"""Sets the document data license.
Raises value error if malformed value, CardinalityError
if already defined.
"""
if not self.doc_data_lics_set:
self.doc_data_lics_set = True
res_parts = res.split('/')
if len(res_parts) != 0:
identifier = res_parts[-1]
doc.data_license = document.License.from_identifier(identifier)
else:
raise SPDXValueError('Document::License')
else:
raise CardinalityError('Document::License')
def set_doc_comment(self, doc, comment):
"""Sets document comment, Raises CardinalityError if
comment already set.
"""
if not self.doc_comment_set:
self.doc_comment_set = True
doc.comment = comment
else:
raise CardinalityError('Document::Comment')
def reset_document(self):
"""Resets the state to allow building new documents"""
self.doc_version_set = False
self.doc_comment_set = False
self.doc_data_lics_set = False
class EntityBuilder(tagvaluebuilders.EntityBuilder):
def create_entity(self, doc, value):
if self.tool_re.match(value):
return self.build_tool(doc, value)
elif self.person_re.match(value):
return self.build_person(doc, value)
elif self.org_re.match(value):
return self.build_org(doc, value)
else:
raise SPDXValueError('Entity')
class CreationInfoBuilder(tagvaluebuilders.CreationInfoBuilder):
def set_creation_comment(self, doc, comment):
"""Sets creation comment, Raises CardinalityError if
comment already set.
Raises SPDXValueError if not free form text.
"""
if not self.creation_comment_set:
self.creation_comment_set = True
doc.creation_info.comment = comment
return True
else:
raise CardinalityError('CreationInfo::Comment')
class PackageBuilder(tagvaluebuilders.PackageBuilder):
def set_pkg_chk_sum(self, doc, chk_sum):
"""Sets the package check sum, if not already set.
chk_sum - A string
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_chk_sum_set:
self.package_chk_sum_set = True
doc.package.check_sum = checksum.Algorithm('SHA1', chk_sum)
else:
raise CardinalityError('Package::CheckSum')
def set_pkg_source_info(self, doc, text):
"""Sets the package's source information, if not already set.
text - Free form text.
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_source_info_set:
self.package_source_info_set = True
doc.package.source_info = text
return True
else:
raise CardinalityError('Package::SourceInfo')
def set_pkg_verif_code(self, doc, code):
"""Sets the package verification code, if not already set.
code - A string.
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_verif_set:
self.package_verif_set = True
doc.package.verif_code = code
else:
raise CardinalityError('Package::VerificationCode')
def set_pkg_excl_file(self, doc, filename):
"""Sets the package's verification code excluded file.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
doc.package.add_exc_file(filename)
def set_pkg_license_comment(self, doc, text):
"""Sets the package's license comment.
Raises OrderError if no package previously defined.
Raises CardinalityError if already set.
"""
self.assert_package_exists()
if not self.package_license_comment_set:
self.package_license_comment_set = True
doc.package.license_comment = text
return True
else:
raise CardinalityError('Package::LicenseComment')
def set_pkg_cr_text(self, doc, text):
"""Sets the package's license comment.
Raises OrderError if no package previously defined.
Raises CardinalityError if already set.
"""
self.assert_package_exists()
if not self.package_cr_text_set:
self.package_cr_text_set = True
doc.package.cr_text = text
else:
raise CardinalityError('Package::CopyrightText')
def set_pkg_summary(self, doc, text):
"""Set's the package summary.
Raises CardinalityError if summary already set.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_summary_set:
self.package_summary_set = True
doc.package.summary = text
else:
raise CardinalityError('Package::Summary')
def set_pkg_desc(self, doc, text):
"""Set's the package's description.
Raises CardinalityError if description already set.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_desc_set:
self.package_desc_set = True
doc.package.description = text
else:
raise CardinalityError('Package::Description')
class FileBuilder(tagvaluebuilders.FileBuilder):
def set_file_chksum(self, doc, chk_sum):
"""Sets the file check sum, if not already set.
chk_sum - A string
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_chksum_set:
self.file_chksum_set = True
self.file(doc).chk_sum = checksum.Algorithm('SHA1', chk_sum)
return True
else:
raise CardinalityError('File::CheckSum')
else:
raise OrderError('File::CheckSum')
def set_file_license_comment(self, doc, text):
"""
Raises OrderError if no package or file defined.
Raises CardinalityError if more than one per file.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
self.file(doc).license_comment = text
return True
else:
raise CardinalityError('File::LicenseComment')
else:
raise OrderError('File::LicenseComment')
def set_file_copyright(self, doc, text):
"""Raises OrderError if no package or file defined.
Raises CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_copytext_set:
self.file_copytext_set = True
self.file(doc).copyright = text
return True
else:
raise CardinalityError('File::CopyRight')
else:
raise OrderError('File::CopyRight')
def set_file_comment(self, doc, text):
"""Raises OrderError if no package or no file defined.
Raises CardinalityError if more than one comment set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_comment_set:
self.file_comment_set = True
self.file(doc).comment = text
return True
else:
raise CardinalityError('File::Comment')
else:
raise OrderError('File::Comment')
def set_file_notice(self, doc, text):
"""Raises OrderError if no package or file defined.
Raises CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_notice_set:
self.file_notice_set = True
self.file(doc).notice = tagvaluebuilders.str_from_text(text)
return True
else:
raise CardinalityError('File::Notice')
else:
raise OrderError('File::Notice')
class ReviewBuilder(tagvaluebuilders.ReviewBuilder):
def add_review_comment(self, doc, comment):
"""Sets the review comment. Raises CardinalityError if
already set. OrderError if no reviewer defined before.
"""
if len(doc.reviews) != 0:
if not self.review_comment_set:
self.review_comment_set = True
doc.reviews[-1].comment = comment
return True
else:
raise CardinalityError('ReviewComment')
else:
raise OrderError('ReviewComment')
class Builder(DocBuilder, EntityBuilder, CreationInfoBuilder, PackageBuilder, FileBuilder, ReviewBuilder):
def reset(self):
"""Resets builder's state for building new documents.
Must be called between usage with different documents.
"""
self.reset_document()
self.reset_package()
self.reset_file_stat()
self.reset_reviews()
| |
import typing as t
from gssapi._utils import import_gssapi_extension
from gssapi.raw import oids as roids
from gssapi.raw import misc as rmisc
from gssapi.raw import named_tuples as tuples
from gssapi.raw import names as rnames
from gssapi import _utils
rfc5587 = import_gssapi_extension('rfc5587')
rfc5801 = import_gssapi_extension('rfc5801')
class Mechanism(roids.OID):
"""
A GSSAPI Mechanism
This class represents a mechanism and centralizes functions dealing with
mechanisms and can be used with any calls.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.oids.OID` class,
and thus can be used with both low-level and high-level API calls.
"""
def __new__(
cls,
cpy: t.Optional[roids.OID] = None,
elements: t.Optional[bytes] = None,
) -> "Mechanism":
return t.cast("Mechanism",
super(Mechanism, cls).__new__(cls, cpy, elements))
@property
def name_types(self) -> t.Set[roids.OID]:
"""
Get the set of name types supported by this mechanism.
"""
return rmisc.inquire_names_for_mech(self)
@property
def _saslname(self) -> tuples.InquireSASLNameResult:
if rfc5801 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5801")
return rfc5801.inquire_saslname_for_mech(self)
@property
def _attrs(self) -> tuples.InquireAttrsResult:
if rfc5587 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5587")
return rfc5587.inquire_attrs_for_mech(self)
def __str__(self) -> str:
return self._bytes_desc().decode(_utils._get_encoding())
def __unicode__(self) -> str:
return self._bytes_desc().decode(_utils._get_encoding())
def _bytes_desc(self) -> bytes:
base: t.Union[bytes, str] = self.dotted_form
if rfc5801 is not None and self._saslname and self._saslname.mech_name:
base = self._saslname.mech_name
if isinstance(base, str):
base = base.encode(_utils._get_encoding())
return base
def __repr__(self) -> str:
"""
Get a name representing the mechanism; always safe to call
"""
base = "<Mechanism (%s)>" % self.dotted_form
if rfc5801 is not None:
base = "<Mechanism %s (%s)>" % (
self._saslname.mech_name.decode('UTF-8'),
self.dotted_form
)
return base
@property
def sasl_name(self) -> str:
"""
Get the SASL name for the mechanism
:requires-ext:`rfc5801`
"""
return self._saslname.sasl_mech_name.decode('UTF-8')
@property
def description(self) -> str:
"""
Get the description of the mechanism
:requires-ext:`rfc5801`
"""
return self._saslname.mech_description.decode('UTF-8')
@property
def known_attrs(self) -> t.Set[roids.OID]:
"""
Get the known attributes of the mechanism; returns a set of OIDs
([OID])
:requires-ext:`rfc5587`
"""
return self._attrs.known_mech_attrs
@property
def attrs(self) -> t.Set[roids.OID]:
"""
Get the attributes of the mechanism; returns a set of OIDs ([OID])
:requires-ext:`rfc5587`
"""
return self._attrs.mech_attrs
@classmethod
def all_mechs(cls) -> t.Iterator["Mechanism"]:
"""
Get a generator of all mechanisms supported by GSSAPI
"""
return (cls(mech) for mech in rmisc.indicate_mechs())
@classmethod
def from_name(
cls,
name: rnames.Name,
) -> t.Iterator["Mechanism"]:
"""
Get a generator of mechanisms that may be able to process the name
Args:
name (~gssapi.names.Name): a name to inquire about
Returns:
[Mechanism]: a set of mechanisms which support this name
Raises:
~gssapi.exceptions.GSSError
"""
return (cls(mech) for mech in rmisc.inquire_mechs_for_name(name))
@classmethod
def from_sasl_name(
cls,
name: t.Optional[t.Union[bytes, str]] = None,
) -> "Mechanism":
"""
Create a Mechanism from its SASL name
Args:
name (str): SASL name of the desired mechanism
Returns:
Mechanism: the desired mechanism
Raises:
~gssapi.exceptions.GSSError
:requires-ext:`rfc5801`
"""
if rfc5801 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5801")
if isinstance(name, str):
name = name.encode(_utils._get_encoding())
m = rfc5801.inquire_mech_for_saslname(name)
return cls(m)
@classmethod
def from_attrs(
cls,
desired_attrs: t.Optional[
t.Union[roids.OID, t.Iterable[roids.OID]]
] = None,
except_attrs: t.Optional[
t.Union[roids.OID, t.Iterable[roids.OID]]
] = None,
critical_attrs: t.Optional[
t.Union[roids.OID, t.Iterable[roids.OID]]
] = None,
) -> t.Iterator["Mechanism"]:
"""
Get a generator of mechanisms supporting the specified attributes. See
RFC 5587's :func:`indicate_mechs_by_attrs` for more information.
Args:
desired_attrs ([OID]): Desired attributes
except_attrs ([OID]): Except attributes
critical_attrs ([OID]): Critical attributes
Returns:
[Mechanism]: A set of mechanisms having the desired features.
Raises:
~gssapi.exceptions.GSSError
:requires-ext:`rfc5587`
"""
if isinstance(desired_attrs, roids.OID):
desired_attrs = set([desired_attrs])
if isinstance(except_attrs, roids.OID):
except_attrs = set([except_attrs])
if isinstance(critical_attrs, roids.OID):
critical_attrs = set([critical_attrs])
if rfc5587 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5587")
mechs = rfc5587.indicate_mechs_by_attrs(desired_attrs,
except_attrs,
critical_attrs)
return (cls(mech) for mech in mechs)
| |
#! /usr/bin/env python
import argparse
import copy
import json
import operator
import re
import time
import traceback
# support Python 2 and 3's versions of this module
try:
import html
HTML_UNESCAPER = html
except ImportError:
import HTMLParser
HTML_UNESCAPER = HTMLParser.HTMLParser()
import executor
class Flagger(executor.Executor):
operators = {'>': operator.gt, '<': operator.lt, '==': operator.eq,
'>=': operator.ge, '<=': operator.le}
def __init__(self, *args, **kwargs):
self.debug = kwargs.pop('debug', False)
super(self.__class__, self).__init__(*args, **kwargs)
self.now = int(time.time())
def extract_threshold(self, token):
"""
accept tokens of the format:
int
>=int
<=int
==int
>int
<int
returns [comparator, int] or throws error if invalid
"""
comparator = re.sub("\d+$", "", token)
value = int(re.sub("\D*", "", token))
if comparator == '': # no comparator specified
comparator = '>='
comparator = HTML_UNESCAPER.unescape(comparator)
self.logger.debug("token: %s comparator: %s value: %s", token, comparator, value)
assert comparator in self.operators
return (comparator, value)
def initialize_control(self):
"""
sets up known control configuration based on control channel messages
"""
channel = self.config.control_channel
if not self.slacker.channel_exists(channel):
self.logger.warning("Flagger control channel does not exist, cannot run. Please create #%s.", channel)
return False
cid = self.slacker.get_channelid(channel)
messages = self.slacker.get_messages_in_time_range(0, cid, self.now)
control = {}
for message in messages:
text = message['text']
tokens = text.split()
if tokens[0:3] != ['flag', 'content', 'rule']:
continue
if len(tokens) < 5:
self.logger.warning("Control message %s has too few tokens", text)
continue
if len(tokens) == 5 and tokens[4] == 'delete':
uuid = tokens[3]
if uuid in control:
del(control[uuid])
self.logger.debug("Message %s deletes UUID %s", text, uuid)
continue
try:
tokens = text.split()
uuid = tokens[3]
comparator, threshold = self.extract_threshold(tokens[4])
emoji = tokens[5].replace(":", "")
output_channel_id = re.sub("[<>]", "", tokens[6])
if output_channel_id.find("|") != -1:
cid, cname = output_channel_id.split("|")
output_channel_id = cid
output_channel_name = self.slacker.replace_id(output_channel_id)
control[uuid] = {'threshold': threshold, "comparator": comparator,
'emoji': emoji, 'output': output_channel_name}
except Exception as e:
tb = traceback.format_exc()
m = "Couldn't create flagger rule with text {}: {} {}".format(text, Exception, e)
self.logger.warning(m)
self.logger.debug(tb)
self.control = control
self.logger.debug("control: %s", json.dumps(self.control, indent=4))
self.emoji = [x['emoji'] for x in self.control.values()]
self.initialize_emoji_aliases()
return True
def initialize_emoji_aliases(self):
"""
In some cases, emojiA might be an alias of emojiB
The problem is that if we say that 2xemojiB should be
enough to flag something, then we should accept
2 x emojiB
1 x emojiA, 1 x emojiB
2 x emojiA
This method grabs the emoji list from the Slack and creates the equivalence
structure
"""
self.logger.debug("Starting emoji alias list")
emojis_response = self.slacker.get_emojis()
self.logger.debug("emojis_response keys are %s", emojis_response.keys())
emojis = emojis_response['emoji']
equivalents = {}
for emoji in emojis:
target = emojis[emoji]
target_type, target_value = target.split(":", 1)
if target_type != "alias":
continue
self.logger.debug("Found emoji alias: %s <-> %s", emoji, target_value)
if emoji not in equivalents:
equivalents[emoji] = []
if target_value not in equivalents:
equivalents[target_value] = []
equivalents[emoji].append(target_value)
equivalents[target_value].append(emoji)
self.emoji_equivalents = equivalents
self.logger.debug("equivalents: %s", json.dumps(self.emoji_equivalents, indent=4))
if "floppy_disk" in self.emoji_equivalents.keys():
self.logger.debug("floppy_disk: %s", self.emoji_equivalents['floppy_disk'])
def message_destination(self, message):
"""
if interesting, returns channel name[s] in which to announce
otherwise, returns []
"""
channels = []
if message.get("reactions") is None:
return False
reactions = message.get("reactions")
emoji_set = set(self.emoji)
current_reactions = {}
self.logger.debug("reactions: %s", reactions)
self.logger.debug("emoji_equivalents:\n%s", json.dumps(self.emoji_equivalents, indent=4))
if "floppy_disk" in self.emoji_equivalents.keys():
self.logger.debug("floppy_disk: %s", self.emoji_equivalents['floppy_disk'])
for reaction in reactions:
count = reaction['count']
current_emoji = reaction['name']
self.logger.debug("current_emoji: %s", current_emoji)
equivalents = copy.copy(self.emoji_equivalents.get(current_emoji, []))
self.logger.debug("equivalents: %s", equivalents)
equivalents.append(current_emoji)
self.logger.debug("equivalents: %s", equivalents)
current_set = set(equivalents)
i = current_set.intersection(emoji_set)
if not i:
continue
for ce in equivalents:
current_reactions[ce] = current_reactions.get(ce, 0) + count
# if we're here, at least one emoji matches (but count may still not be right)
self.logger.debug("Current reactions: {}".format(current_reactions))
for uuid in self.control:
rule = self.control[uuid]
for ce in current_reactions:
if ce == rule['emoji']:
count = current_reactions[ce]
threshold = rule['threshold']
comparator = rule['comparator']
op = self.operators[comparator]
if op(count, threshold):
channels.append(rule)
return channels
def get_interesting_messages(self):
"""
returns [[message, [listofchannelstoannounce]]
"""
dayago = self.now - 86400
messages = []
for channel in self.slacker.channels_by_name:
cid = self.slacker.get_channelid(channel)
cur_messages = self.slacker.get_messages_in_time_range(dayago, cid, self.now)
for message in cur_messages:
announce = self.message_destination(message)
if announce:
messages.append([message, announce])
return messages
def announce_interesting_messages(self):
messages = self.get_interesting_messages()
for message, channels in messages:
ts = message["ts"].replace(".", "")
channel = message["channel"]
author = message["user"]
author_name = self.slacker.users_by_id[author]
text = self.slacker.asciify(message["text"])
text = self.slacker.detokenize(text)
url = "http://{}.slack.com/archives/{}/p{}".format(self.config.slack_name, channel, ts)
m = "*@{}* said in *#{}* _'{}'_ ({})".format(author_name, channel, text, url)
for output_channel in channels:
if self.slacker.channel_exists(output_channel["output"]):
md = "Saying {} to {}".format(m, output_channel["output"])
self.logger.debug(md)
if not self.debug and self.config.activated: # TODO: rename debug to dry run?
self.slackbot.say(output_channel["output"], m)
else:
self.logger.warning("Attempted to announce in %s because of rule :%s:%s%s, but channel does not exist.".format(
output_channel["output"],
output_channel["emoji"],
output_channel["comparator"],
output_channel["threshold"]
))
def flag(self):
if self.config.flagger_disabled:
self.logger.info("Not Flagging... Flagger disabled")
return
self.logger.info("Flagging")
if self.initialize_control():
self.announce_interesting_messages()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Flag interesting Slack messages.')
parser.add_argument("--debug", action="store_true", default=False)
args = parser.parse_args()
Flagger(debug=args.debug).flag()
| |
import os
import json
from django.utils.safestring import mark_safe
from django.http import HttpResponse, HttpResponseRedirect,\
HttpResponseNotAllowed, HttpResponseNotFound
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import permission_required, login_required
from django.utils.decorators import method_decorator
from django.conf import settings
from survey.models import Survey, Location, LocationType,\
QuestionSet, ListingTemplate, Batch,\
Question, QuestionTemplate, QuestionOption, QuestionFlow, Answer
from survey.utils.query_helper import get_filterset
from survey.models import LocationType
from survey.models import Interview
from survey.forms.enumeration_area import LocationsFilterForm
from survey.forms.question_set import get_question_set_form
from survey.forms.question import get_question_form
from survey.forms.filters import QuestionSetResultsFilterForm
from survey.forms.filters import SurveyResultsFilterForm
from survey.odk.utils.odk_helper import get_zipped_dir
from survey.services.results_download_service import ResultsDownloadService
from django.db.models import ProtectedError
model = QuestionSet
QuestionsForm = get_question_form(Question)
class QuestionSetView(object):
# users of this class methods needs to set their own bread crumbs
model = QuestionSet
questionSetForm = get_question_set_form(QuestionSet)
def __init__(self, model_class=model, *args, **kwargs):
if issubclass(model_class, QuestionSet):
self.model = model_class
self.questionSetForm = get_question_set_form(
model_class) # create appropriate qset form
else:
raise HttpResponseNotAllowed('Illegal access')
@method_decorator(permission_required('auth.can_view_batches'))
def index(
self,
request,
qsets,
extra_context={},
template_name='question_set/index.html',
**form_extra):
search_fields = ['name', 'description']
if 'q' in request.GET:
qsets = get_filterset(qsets, request.GET['q'], search_fields)
context = {
'question_sets': qsets.order_by('-created'),
'request': request,
'model': self.model,
'placeholder': 'name, description',
'model_name' : self.model.__name__,
'question_set_form': self.questionSetForm(
**form_extra)}
context.update(extra_context)
return render(request, template_name,
context)
@method_decorator(permission_required('auth.can_view_batches'))
def new(
self,
request,
extra_context={},
template_name='question_set/new.html',
**form_extra):
# self._set_bread_crumbs(request)
response = None
if request.method == 'POST':
qset_form = self.questionSetForm(request.POST, **form_extra)
if qset_form.is_valid():
qset_form = self._save_form(request, qset_form)
messages.success(
request, '%s successfully added.' %
self.model.verbose_name())
response = HttpResponseRedirect(
reverse(
'%s_home' %
self.model.resolve_tag()))
else:
qset_form = self.questionSetForm(**form_extra)
# if qset_form.errors:
# messages.error(request, qset_form.errors.values()[0])
cancel_url = reverse('%s_home' % self.model.resolve_tag())
if "initial" in form_extra:
if "survey" in form_extra['initial']:
cancel_url = reverse('batch_index_page', args=[form_extra['initial']['survey'], ])
context = {'question_set_form': qset_form,
'title': "New %s" % self.model.verbose_name(),
'button_label': 'Create',
'id': 'add-question_set-form',
'model': self.model,
'cancel_url': request.META.get('HTTP_REFERER') or cancel_url
}
context.update(extra_context)
return response or render(request, template_name, context)
def _save_form(self, request, qset_form, **kwargs):
return qset_form.save(**request.POST)
@method_decorator(permission_required('auth.can_view_batches'))
def edit(
self,
request,
qset,
extra_context={},
template_name='question_set/new.html',
**form_extra):
if request.method == 'POST':
qset_form = self.questionSetForm(instance=qset, data=request.POST)
if qset_form.is_valid():
qset_form = self._save_form(request, qset_form)
messages.success(
request, '%s successfully edited.' %
self.model.verbose_name())
return HttpResponseRedirect(
reverse(
'%s_home' %
self.model.resolve_tag()))
else:
qset_form = self.questionSetForm(instance=qset, **form_extra)
context = {
'request': request,
'model': self.model,
'listing_model': ListingTemplate,
'id': 'edit-question-set-form',
'placeholder': 'name, description',
'question_set_form': qset_form,
'action': '',
'cancel_url': request.META.get('HTTP_REFERER') or reverse('%s_home' % self.model.resolve_tag())}
context.update(extra_context)
return render(request, template_name, context)
@method_decorator(permission_required('auth.can_view_batches'))
def delete(self, request, qset):
if qset.interviews.exists():
messages.error(
request,
"%s cannot be deleted because it already has interviews." %
self.model.verbose_name())
else:
qset.delete()
messages.info(request, "%s Successfully deleted!." % self.model.verbose_name())
return HttpResponseRedirect('%s_home' % self.model.resolve_tag())
@permission_required('auth.can_view_batches')
def delete(request, question_id, batch_id):
# todo: Should remove question_id from this parameters :(
qset = QuestionSet.get(pk=batch_id)
view = QuestionSetView(qset.__class__)
if qset.__class__ == Batch:
survey = qset.survey
view.delete(request, qset)
return HttpResponseRedirect(reverse('batch_index_page', args=(survey.id, )))
else:
return view.delete(request, qset)
def delete_qset_listingform(request, question_id):
question = get_object_or_404(Question, pk=question_id)
qset = question.qset
if qset.interviews.exists():
msg = "%s cannot be deleted because it already has interviews." % qset.verbose_name()
messages.error(
request,msg)
else:
try:
qset.delete()
messages.success(request, "Listing form successfully deleted.")
except ProtectedError as e:
pass
messages.success(request, "You can't delete this because it's being used by another")
pass
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required
@permission_required('auth.can_view_aggregates')
def view_data(request, qset_id):
qset = QuestionSet.get(pk=qset_id)
request.breadcrumbs(qset.edit_breadcrumbs(qset=qset))
disabled_fields = []
request.GET = request.GET.copy()
request.GET['question_set'] = qset_id
disabled_fields.append('question_set')
if hasattr(qset, 'survey'):
request.GET['survey'] = qset.survey.id
disabled_fields.append('survey')
title = 'View Data'
return _view_qset_data(
request,
qset.__class__,
Interview.objects.filter(
question_set__id=qset_id),
title,
disabled_fields=disabled_fields)
@login_required
@permission_required('auth.can_view_aggregates')
def view_listing_data(request):
interviews = Interview.objects.filter(
question_set__id__in=ListingTemplate.objects.values_list(
'id', flat=True))
title = 'View Listing Data'
return _view_qset_data(request, ListingTemplate, interviews,title)
@login_required
@permission_required('auth.can_view_aggregates')
def view_survey_data(request):
interviews = Interview.objects.filter(
question_set__id__in=Batch.objects.values_list(
'id', flat=True))
title = 'View Survey Data'
return _view_qset_data(request, Batch, interviews,title)
def _view_qset_data(request, model_class, interviews,title, disabled_fields=[]):
params = request.GET if request.method == 'GET' else request.POST
survey_filter = SurveyResultsFilterForm(
model_class, disabled_fields=disabled_fields, data=params)
locations_filter = LocationsFilterForm(data=request.GET, include_ea=True)
selected_qset = None
survey = None
items_per_page = int(params.get('max_display_per_page', 50))
try:
page_index = int(params.get('page', 1)) - 1
except BaseException:
page_index = 0
if survey_filter.is_valid():
interviews = survey_filter.get_interviews(interviews=interviews)
selected_qset = survey_filter.cleaned_data['question_set']
survey = survey_filter.cleaned_data['survey']
if locations_filter.is_valid():
interviews = interviews.filter(ea__in=locations_filter.get_enumerations())
search_fields = [
'ea__name',
'survey__name',
'question_set__name',
'answer__as_text',
]
if 'q' in request.GET:
interviews = get_filterset(interviews, request.GET['q'], search_fields)
context = {
'survey_filter': survey_filter,
'interviews': interviews,
'locations_filter': locations_filter,
'location_filter_types': LocationType.in_between(),
'placeholder': 'Response, EA, Survey, %s' % model_class.verbose_name(),
'selected_qset': selected_qset,
'model_class': model_class,
'items_per_page': items_per_page,
'max_display_per_page': items_per_page,
'title':title}
if selected_qset and survey:
# page_start = page_index * items_per_page
# interviews = interviews[page_start: page_start + items_per_page]()
download_service = ResultsDownloadService(
selected_qset,
survey=survey,
interviews=interviews,
page_index=page_index,
items_per_page=items_per_page)
df = download_service.get_interview_answers()
context['report'] = mark_safe(
df.to_html(
classes='table table-striped\
dataTable table-bordered table-hover table-sort',
max_rows=items_per_page))
return render(request, 'question_set/view_all_data.html', context)
@login_required
@permission_required('auth.can_view_aggregates')
def listing_entries(request, qset_id):
listing_qset = get_object_or_404(ListingTemplate, pk=qset_id)
surveys = listing_qset.survey_settings.all()
request.breadcrumbs(listing_qset.edit_breadcrumbs(qset=listing_qset))
search_fields = ['name', ]
if 'q' in request.GET:
surveys = get_filterset(surveys, request.GET['q'], search_fields)
context = {
'question_set': listing_qset,
'surveys': surveys,
'placeholder': 'name,',
}
return render(request, 'question_set/listing_entries.html', context)
@login_required
def identifiers(request):
id = request.GET.get('id', None)
last_question_id = request.GET.get('q_id', None)
if last_question_id is None:
json_dump = json.dumps(
list(
Question.objects.filter(
qset__id=id).values_list(
'identifier',
flat=True)))
else:
# return questions before last question
qset = QuestionSet.get(pk=id)
identifiers = set()
for question in qset.flow_questions:
if int(question.id) == int(last_question_id):
break
identifiers.add(question.identifier)
json_dump = json.dumps(list(identifiers))
return HttpResponse(json_dump, content_type='application/json')
def clone_qset(request, qset_id):
qset = QuestionSet.get(pk=qset_id)
qset.deep_clone()
messages.info(request, 'Successfully cloned %s' % qset.name)
return HttpResponseRedirect(reverse('%s_home' % qset.resolve_tag()))
@permission_required('auth.can_view_aggregates')
def download_attachment(request, question_id, interview_id):
question = get_object_or_404(Question, pk=question_id)
interview = get_object_or_404(Interview, pk=interview_id)
answer_class = Answer.get_class(question.answer_type)
filename = '%s-%s.zip' % (question.identifier, question_id)
try:
attachment_dir = os.path.join(
settings.SUBMISSION_UPLOAD_BASE,
str(answer_class.get(
interview=interview,
question=question).value),
'attachments')
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
response.write(get_zipped_dir(attachment_dir))
return response
except Exception as e:
return HttpResponse(str(e))
def download_data(request, qset_id):
qset = QuestionSet.get(pk=qset_id)
params = request.GET if request.method == 'GET' else request.POST
survey_filter = QuestionSetResultsFilterForm(qset, data=params)
locations_filter = LocationsFilterForm(data=request.GET, include_ea=True)
interviews = survey_filter.get_interviews()
if locations_filter.is_valid():
interviews = interviews.filter(
ea__in=locations_filter.get_enumerations()).order_by('created')
last_selected_loc = locations_filter.last_location_selected
download_service = ResultsDownloadService(qset, interviews=interviews)
file_name = '%s%s' % ('%s-%s-' % (
last_selected_loc.type.name,
last_selected_loc.name) if last_selected_loc else '',
qset.name)
reports_df = download_service.generate_interview_reports()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;\
filename="%s.csv"' % file_name
reports_df.to_csv(
response,
date_format='%Y-%m-%d %H:%M:%S',
encoding='utf-8') # exclude interview id
return response
@login_required
def list_qsets(request):
if request.GET.get('survey_id'):
values = Survey.get(
id=request.GET.get('survey_id')).qsets.values(
'id', 'name')
else:
values = QuestionSet.objects.values('id', 'name')
return HttpResponse(
json.dumps(
list(values)),
content_type='application/json')
@login_required
def list_questions(request):
if request.GET.get('id'):
values = [{
'id': q.id,
'identifier': q.identifier,
'text': q.text}
for q in QuestionSet.get(id=request.GET.get('id')).all_questions]
else:
values = list(
Question.objects.all().values(
'id', 'identifier', 'text'))
return HttpResponse(
json.dumps(
list(values)),
content_type='application/json')
@login_required
def question_validators(request):
values = {}
if request.GET.get('id'):
for question in QuestionSet.get(
id=request.GET.get('id')).all_questions:
values['%s' % question.id] = [
validator for validator in question.validator_names()]
elif request.GET.get('ques_id'):
values = Question.get(id=request.GET.get('ques_id')).validator_names()
return HttpResponse(json.dumps(values), content_type='application/json')
@login_required
def question_options(request):
values = {}
if request.GET.get('id'):
for question in QuestionSet.get(
id=request.GET.get('id')).all_questions:
values['%s' % question.id] = dict(
[(opt.order, opt.text) for opt in question.options.all()])
elif request.GET.get('ques_id'):
values = dict(
Question.get(
id=request.GET.get('ques_id')).options.values_list(
'order',
'text'))
return HttpResponse(json.dumps(values), content_type='application/json')
| |
# -*- coding: utf-8 -*-
# 1st-run initialisation
# Set settings.base.prepopulate to 0 in Production
# (to save 1x DAL hit every page).
pop_list = settings.get_base_prepopulate()
if pop_list == 0:
pop_list = []
else:
table = db[auth.settings.table_group_name]
# The query used here takes 2/3 the time of .count().
if db(table.id > 0).select(table.id, limitby=(0, 1)).first():
pop_list = []
if not isinstance(pop_list, (list, tuple)):
pop_list = [pop_list]
if len(pop_list) > 0:
# =========================================================================
# Populate default roles and permissions
#
# Allow debug
import sys
print >> sys.stdout, "Please be patient whilst the database is populated"
# Shortcuts
acl = auth.permission
sysroles = auth.S3_SYSTEM_ROLES
create_role = auth.s3_create_role
#update_acls = auth.s3_update_acls
# Do not remove or change order of these role definitions (System Roles):
create_role("Administrator",
"System Administrator - can access & make changes to any data",
uid=sysroles.ADMIN,
system=True,
protected=True,
)
create_role("Authenticated",
"Authenticated - all logged-in users",
uid=sysroles.AUTHENTICATED,
system=True,
protected=True,
)
create_role("Anonymous",
"Unauthenticated users",
# Allow unauthenticated users to view the list of organisations
# so they can select an organisation when registering
dict(t="org_organisation", uacl=acl.READ),
# Allow unauthenticated users to see the list of sites for an
# org when registering
dict(c="org", f="sites_for_org", uacl=acl.READ),
uid=sysroles.ANONYMOUS,
system=True,
protected=True,
)
# Primarily for Security Policy 2
create_role("Editor",
"Editor - can access & make changes to any unprotected data",
uid=sysroles.EDITOR,
system=True,
protected=True,
)
# MapAdmin
map_admin = create_role("MapAdmin",
"MapAdmin - allowed access to edit the MapService Catalogue",
dict(c="gis", uacl=acl.ALL, oacl=acl.ALL),
dict(c="gis", f="location", uacl=acl.ALL, oacl=acl.ALL),
uid=sysroles.MAP_ADMIN,
system=True,
protected=True,
)
# OrgAdmin (policies 6, 7 and 8)
create_role("OrgAdmin",
"OrgAdmin - allowed to manage user roles for organisation realms",
uid=sysroles.ORG_ADMIN,
system=True,
protected=True,
)
# OrgGroupAdmin (policies 6, 7 and 8)
create_role("OrgGroupAdmin",
"OrgGroupAdmin - allowed to manage organisation group realms",
uid=sysroles.ORG_GROUP_ADMIN,
system=True,
protected=True,
)
# Enable shortcuts (needed by default.py)
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
ANONYMOUS = system_roles.ANONYMOUS
EDITOR = system_roles.EDITOR
MAP_ADMIN = system_roles.MAP_ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
ORG_GROUP_ADMIN = system_roles.ORG_GROUP_ADMIN
# =========================================================================
# Configure Scheduled Tasks
#
has_module = settings.has_module
if has_module("msg"):
# Send Messages from Outbox
# SMS every minute
s3task.schedule_task("msg_process_outbox",
vars={"contact_method":"SMS"},
period=120, # seconds
timeout=120, # seconds
repeats=0 # unlimited
)
# Emails every 5 minutes
s3task.schedule_task("msg_process_outbox",
vars={"contact_method":"EMAIL"},
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
# Tweets every minute
#s3task.schedule_task("msg_process_outbox",
# vars={"contact_method":"TWITTER"},
# period=120, # seconds
# timeout=120, # seconds
# repeats=0 # unlimited
# )
# Subscription notifications
s3task.schedule_task("notify_check_subscriptions",
period=300,
timeout=300,
repeats=0)
# Daily maintenance
s3task.schedule_task("maintenance",
vars={"period":"daily"},
period=86400, # seconds, so 1/day
timeout=600, # seconds
repeats=0 # unlimited
)
# =========================================================================
# Import PrePopulate data
#
# Override authorization
auth.override = True
# No location tree updates
gis.disable_update_location_tree = True
# Load all Models to ensure all DB tables present
s3db.load_all_models()
# Shortcuts
path_join = os.path.join
request_folder = request.folder
if settings.get_auth_opt_in_to_email():
table = db.pr_group
for team in settings.get_auth_opt_in_team_list():
table.insert(name = team, group_type = 5)
# Synchronisation
db.sync_config.insert() # Defaults are fine
# Messaging Module
if has_module("msg"):
update_super = s3db.update_super
# To read inbound email, set username (email address), password, etc.
# here. Insert multiple records for multiple email sources.
table = db.msg_email_channel
id = table.insert(server = "imap.gmail.com",
protocol = "imap",
use_ssl = True,
port = 993,
username = "example-username",
password = "password",
delete_from_server = False
)
update_super(table, dict(id=id))
# Need entries for the Settings/1/Update URLs to work
table = db.msg_twitter_channel
id = table.insert(enabled = False)
update_super(table, dict(id=id))
# Budget Module
if has_module("budget"):
db.budget_parameter.insert() # Defaults are fine
# Climate Module
if has_module("climate"):
s3db.climate_first_run()
# Incident Reporting System
if has_module("irs"):
# Categories visible to ends-users by default
table = db.irs_icategory
table.insert(code = "flood")
table.insert(code = "geophysical.landslide")
table.insert(code = "roadway.bridgeClosure")
table.insert(code = "roadway.roadwayClosure")
table.insert(code = "other.buildingCollapsed")
table.insert(code = "other.peopleTrapped")
table.insert(code = "other.powerFailure")
# Supply Module
if has_module("supply"):
db.supply_catalog.insert(name = settings.get_supply_catalog_default())
# Ensure DB population committed when running through shell
db.commit()
# =========================================================================
# PrePopulate import (from CSV)
#
# Create the bulk Importer object
bi = s3base.S3BulkImporter()
# Register handlers
s3.import_font = bi.import_font
s3.import_image = bi.import_image
s3.import_remote_csv = bi.import_remote_csv
s3.import_role = bi.import_role
s3.import_script = bi.import_script
s3.import_user = bi.import_user
# Relax strict email-matching rule for import updates of person records
email_required = settings.get_pr_import_update_requires_email()
settings.pr.import_update_requires_email = False
# Additional settings for user table imports:
s3db.configure("auth_user",
onaccept = lambda form: auth.s3_approve_user(form.vars))
s3db.add_components("auth_user", auth_membership="user_id")
# Flag that Assets are being imported, not synced
s3.asset_import = True
# Allow population via shell scripts
if not request.env.request_method:
request.env.request_method = "GET"
grandTotalStart = datetime.datetime.now()
for pop_setting in pop_list:
start = datetime.datetime.now()
# Clear Tasklist
bi.tasks = []
# Import data specific to the prepopulate setting
if pop_setting == 1:
# Populate with the default data
path = path_join(request_folder,
"modules",
"templates",
"default")
bi.perform_tasks(path)
else:
path = path_join(request_folder,
"modules",
"templates",
pop_setting)
if not os.path.exists(path):
# Legacy template?
path = path_join(request_folder,
"private",
"templates",
pop_setting)
if not os.path.exists(path):
print >> sys.stderr, "Unable to install data %s no valid directory found" % pop_setting
continue
bi.perform_tasks(path)
grandTotalEnd = datetime.datetime.now()
duration = grandTotalEnd - grandTotalStart
try:
# Python 2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
print >> sys.stdout, "Pre-populate task completed in %s mins" % duration
except AttributeError:
# older Python
print >> sys.stdout, "Pre-populate task completed in %s" % duration
bi.resultList = []
for errorLine in bi.errorList:
try:
print >> sys.stderr, errorLine
except:
s3_unicode = s3base.s3_unicode
_errorLine = ""
for i in range(0, len(errorLine)):
try:
_errorLine += s3_unicode(errorline[i])
except:
pass
print >> sys.stderr, _errorLine
# Restore setting for strict email-matching
settings.pr.import_update_requires_email = email_required
# Restore Auth
auth.override = False
# Enable location tree updates
gis.disable_update_location_tree = False
# Update Location Tree (disabled during prepop)
start = datetime.datetime.now()
gis.update_location_tree()
end = datetime.datetime.now()
print >> sys.stdout, "Location Tree update completed in %s" % (end - start)
# Countries are only editable by MapAdmin
db(db.gis_location.level == "L0").update(owned_by_group=map_admin)
if has_module("disease"):
# Populate disease_stats_aggregate (disabled during prepop)
# - needs to be done after locations
start = datetime.datetime.now()
s3db.disease_stats_rebuild_all_aggregates()
end = datetime.datetime.now()
print >> sys.stdout, "Disease Statistics data aggregation completed in %s" % (end - start)
if has_module("stats"):
# Populate stats_demographic_aggregate (disabled during prepop)
# - needs to be done after locations
start = datetime.datetime.now()
s3db.stats_demographic_rebuild_all_aggregates()
end = datetime.datetime.now()
print >> sys.stdout, "Demographic data aggregation completed in %s" % (end - start)
if has_module("vulnerability"):
# Populate vulnerability_aggregate (disabled during prepop)
# - needs to be done after locations
start = datetime.datetime.now()
s3db.vulnerability_rebuild_all_aggregates()
end = datetime.datetime.now()
print >> sys.stdout, "Vulnerability data aggregation completed in %s" % (end - start)
grandTotalEnd = datetime.datetime.now()
duration = grandTotalEnd - grandTotalStart
try:
# Python 2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
print >> sys.stdout, "Pre-populate completed in %s mins" % duration
except AttributeError:
# older Python
print >> sys.stdout, "Pre-populate completed in %s" % duration
# =========================================================================
# Indexes
#
# Person Registry
tablename = "pr_person"
# Add extra indexes on search fields
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
field = "first_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "middle_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "last_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# GIS
# Add extra index on search field
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
tablename = "gis_location"
field = "name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
if settings.get_gis_spatialdb():
# Add Spatial Index (PostgreSQL-only currently)
db.executesql("CREATE INDEX gis_location_gist on %s USING GIST (the_geom);" % tablename)
# Ensure the Planner takes this into consideration
# Vacuum cannot run in a transaction block
# autovacuum should be on anyway so will run ANALYZE after 50 rows inserted/updated/deleted
#db.executesql("VACUUM ANALYZE;")
# Restore view
response.view = "default/index.html"
# END =========================================================================
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides classes to perform topological analyses of structures.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Sai Jayaraman"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import math
import numpy as np
import itertools
import collections
from warnings import warn
from pyhull.voronoi import VoronoiTess
from pymatgen import PeriodicSite
from pymatgen import Element, Specie, Composition
from pymatgen.util.num_utils import abs_cap
class VoronoiCoordFinder(object):
"""
Uses a Voronoi algorithm to determine the coordination for each site in a
structure.
Args:
structure (Structure): Input structure
target ([Element/Specie]): A list of target species to determine
coordination for.
cutoff (float): Radius in Angstrom cutoff to look for coordinating
atoms. Defaults to 10.0.
"""
def __init__(self, structure, target=None, cutoff=10.0):
self._structure = structure
self.cutoff = cutoff
if target is None:
self._target = structure.composition.elements
else:
self._target = target
def get_voronoi_polyhedra(self, n):
"""
Gives a weighted polyhedra around a site. This uses the voronoi
construction with solid angle weights.
See ref: A Proposed Rigorous Definition of Coordination Number,
M. O'Keeffe, Acta Cryst. (1979). A35, 772-775
Args:
n (int): Site index
Returns:
A dict of sites sharing a common Voronoi facet with the site
n and their solid angle weights
"""
localtarget = self._target
center = self._structure[n]
neighbors = self._structure.get_sites_in_sphere(
center.coords, self.cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = VoronoiTess(qvoronoi_input)
all_vertices = voro.vertices
results = {}
for nn, vind in voro.ridges.items():
if 0 in nn:
if 0 in vind:
raise RuntimeError("This structure is pathological,"
" infinite vertex in the voronoi "
"construction")
facets = [all_vertices[i] for i in vind]
results[neighbors[nn[1]]] = solid_angle(center.coords, facets)
maxangle = max(results.values())
resultweighted = {}
for nn, angle in results.items():
if nn.specie in localtarget:
resultweighted[nn] = angle / maxangle
return resultweighted
def get_coordination_number(self, n):
"""
Returns the coordination number of site with index n.
Args:
n (int): Site index
"""
return sum(self.get_voronoi_polyhedra(n).values())
def get_coordinated_sites(self, n, tol=0, target=None):
"""
Returns the sites that are in the coordination radius of site with
index n.
Args:
n (int): Site index.
tol (float): Weight tolerance to determine if a particular pair is
considered a neighbor.
target (Element): Target element
Returns:
Sites coordinating input site.
"""
coordinated_sites = []
for site, weight in self.get_voronoi_polyhedra(n).items():
if weight > tol and (target is None or site.specie == target):
coordinated_sites.append(site)
return coordinated_sites
class RelaxationAnalyzer(object):
"""
This class analyzes the relaxation in a calculation.
"""
def __init__(self, initial_structure, final_structure):
"""
Please note that the input and final structures should have the same
ordering of sites. This is typically the case for most computational
codes.
Args:
initial_structure (Structure): Initial input structure to
calculation.
final_structure (Structure): Final output structure from
calculation.
"""
if final_structure.formula != initial_structure.formula:
raise ValueError("Initial and final structures have different " +
"formulas!")
self.initial = initial_structure
self.final = final_structure
def get_percentage_volume_change(self):
"""
Returns the percentage volume change.
Returns:
Volume change in percentage, e.g., 0.055 implies a 5.5% increase.
"""
initial_vol = self.initial.lattice.volume
final_vol = self.final.lattice.volume
return final_vol / initial_vol - 1
def get_percentage_lattice_parameter_changes(self):
"""
Returns the percentage lattice parameter changes.
Returns:
A dict of the percentage change in lattice parameter, e.g.,
{'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%,
2.1% and -3.1% in the a, b and c lattice parameters respectively.
"""
initial_latt = self.initial.lattice
final_latt = self.final.lattice
d = {l: getattr(final_latt, l) / getattr(initial_latt, l) - 1
for l in ["a", "b", "c"]}
return d
def get_percentage_bond_dist_changes(self, max_radius=3.0):
"""
Returns the percentage bond distance changes for each site up to a
maximum radius for nearest neighbors.
Args:
max_radius (float): Maximum radius to search for nearest
neighbors. This radius is applied to the initial structure,
not the final structure.
Returns:
Bond distance changes as a dict of dicts. E.g.,
{index1: {index2: 0.011, ...}}. For economy of representation, the
index1 is always less than index2, i.e., since bonding between
site1 and siten is the same as bonding between siten and site1,
there is no reason to duplicate the information or computation.
"""
data = collections.defaultdict(dict)
for inds in itertools.combinations(list(range(len(self.initial))), 2):
(i, j) = sorted(inds)
initial_dist = self.initial[i].distance(self.initial[j])
if initial_dist < max_radius:
final_dist = self.final[i].distance(self.final[j])
data[i][j] = final_dist / initial_dist - 1
return data
class VoronoiConnectivity(object):
"""
Computes the solid angles swept out by the shared face of the voronoi
polyhedron between two sites.
Args:
structure (Structure): Input structure
cutoff (float) Cutoff distance.
"""
# Radius in Angstrom cutoff to look for coordinating atoms
def __init__(self, structure, cutoff=10):
self.cutoff = cutoff
self.s = structure
recp_len = np.array(self.s.lattice.reciprocal_lattice.abc)
i = np.ceil(cutoff * recp_len / (2 * math.pi))
offsets = np.mgrid[-i[0]:i[0] + 1, -i[1]:i[1] + 1, -i[2]:i[2] + 1].T
self.offsets = np.reshape(offsets, (-1, 3))
#shape = [image, axis]
self.cart_offsets = self.s.lattice.get_cartesian_coords(self.offsets)
@property
def connectivity_array(self):
"""
Provides connectivity array.
Returns:
connectivity: An array of shape [atomi, atomj, imagej]. atomi is
the index of the atom in the input structure. Since the second
atom can be outside of the unit cell, it must be described
by both an atom index and an image index. Array data is the
solid angle of polygon between atomi and imagej of atomj
"""
#shape = [site, axis]
cart_coords = np.array(self.s.cart_coords)
#shape = [site, image, axis]
all_sites = cart_coords[:, None, :] + self.cart_offsets[None, :, :]
vt = VoronoiTess(all_sites.reshape((-1, 3)))
n_images = all_sites.shape[1]
cs = (len(self.s), len(self.s), len(self.cart_offsets))
connectivity = np.zeros(cs)
vts = np.array(vt.vertices)
for (ki, kj), v in vt.ridges.items():
atomi = ki // n_images
atomj = kj // n_images
imagei = ki % n_images
imagej = kj % n_images
if imagei != n_images // 2 and imagej != n_images // 2:
continue
if imagei == n_images // 2:
#atomi is in original cell
val = solid_angle(vt.points[ki], vts[v])
connectivity[atomi, atomj, imagej] = val
if imagej == n_images // 2:
#atomj is in original cell
val = solid_angle(vt.points[kj], vts[v])
connectivity[atomj, atomi, imagei] = val
if -10.101 in vts[v]:
warn('Found connectivity with infinite vertex. '
'Cutoff is too low, and results may be '
'incorrect')
return connectivity
@property
def max_connectivity(self):
"""
returns the 2d array [sitei, sitej] that represents
the maximum connectivity of site i to any periodic
image of site j
"""
return np.max(self.connectivity_array, axis=2)
def get_connections(self):
"""
Returns a list of site pairs that are Voronoi Neighbors, along
with their real-space distances.
"""
con = []
maxconn = self.max_connectivity
for ii in range(0, maxconn.shape[0]):
for jj in range(0, maxconn.shape[1]):
if maxconn[ii][jj] != 0:
dist = self.s.get_distance(ii, jj)
con.append([ii, jj, dist])
return con
def get_sitej(self, site_index, image_index):
"""
Assuming there is some value in the connectivity array at indices
(1, 3, 12). sitei can be obtained directly from the input structure
(structure[1]). sitej can be obtained by passing 3, 12 to this function
Args:
site_index (int): index of the site (3 in the example)
image_index (int): index of the image (12 in the example)
"""
atoms_n_occu = self.s[site_index].species_and_occu
lattice = self.s.lattice
coords = self.s[site_index].frac_coords + self.offsets[image_index]
return PeriodicSite(atoms_n_occu, coords, lattice)
def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
vals = []
for i in range(len(n) - 1):
v = -np.dot(n[i], n[i + 1])\
/ (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))
vals.append(math.acos(abs_cap(v)))
phi = sum(vals)
return phi + (3 - len(r)) * math.pi
def contains_peroxide(structure, relative_cutoff=1.1):
"""
Determines if a structure contains peroxide anions.
Args:
structure (Structure): Input structure.
relative_cutoff: The peroxide bond distance is 1.49 Angstrom.
Relative_cutoff * 1.49 stipulates the maximum distance two O
atoms must be to each other to be considered a peroxide.
Returns:
Boolean indicating if structure contains a peroxide anion.
"""
ox_type = oxide_type(structure, relative_cutoff)
if ox_type == "peroxide":
return True
else:
return False
class OxideType(object):
"""
Separate class for determining oxide type.
Args:
structure: Input structure.
relative_cutoff: Relative_cutoff * act. cutoff stipulates the max.
distance two O atoms must be from each other. Default value is
1.1. At most 1.1 is recommended, nothing larger, otherwise the
script cannot distinguish between superoxides and peroxides.
"""
def __init__(self, structure, relative_cutoff=1.1):
self.structure = structure
self.relative_cutoff = relative_cutoff
self.oxide_type, self.nbonds = self.parse_oxide()
def parse_oxide(self):
"""
Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide.
Returns:
oxide_type (str): Type of oxide
ozonide/peroxide/superoxide/hydroxide/None.
nbonds (int): Number of peroxide/superoxide/hydroxide bonds in
structure.
"""
structure = self.structure
relative_cutoff = self.relative_cutoff
o_sites_frac_coords = []
h_sites_frac_coords = []
lattice = structure.lattice
if isinstance(structure.composition.elements[0], Element):
comp = structure.composition
elif isinstance(structure.composition.elements[0], Specie):
elmap = collections.defaultdict(float)
for site in structure:
for species, occu in site.species_and_occu.items():
elmap[species.element] += occu
comp = Composition(elmap)
if Element("O") not in comp or comp.is_element:
return "None", 0
for site in structure:
syms = [sp. symbol for sp in site.species_and_occu.keys()]
if "O" in syms:
o_sites_frac_coords.append(site.frac_coords)
if "H" in syms:
h_sites_frac_coords.append(site.frac_coords)
if h_sites_frac_coords:
dist_matrix = lattice.get_all_distances(o_sites_frac_coords,
h_sites_frac_coords)
if np.any(dist_matrix < relative_cutoff * 0.93):
return "hydroxide", len(
np.where(dist_matrix < relative_cutoff * 0.93)[0]) / 2.0
dist_matrix = lattice.get_all_distances(o_sites_frac_coords,
o_sites_frac_coords)
np.fill_diagonal(dist_matrix, 1000)
is_superoxide = False
is_peroxide = False
is_ozonide = False
if np.any(dist_matrix < relative_cutoff * 1.35):
bond_atoms = np.where(dist_matrix < relative_cutoff * 1.35)[0]
is_superoxide = True
elif np.any(dist_matrix < relative_cutoff * 1.49):
is_peroxide = True
bond_atoms = np.where(dist_matrix < relative_cutoff * 1.49)[0]
if is_superoxide:
if len(bond_atoms) > len(set(bond_atoms)):
is_superoxide = False
is_ozonide = True
try:
nbonds = len(set(bond_atoms))
except UnboundLocalError:
nbonds = 0.0
if is_ozonide:
str_oxide = "ozonide"
elif is_superoxide:
str_oxide = "superoxide"
elif is_peroxide:
str_oxide = "peroxide"
else:
str_oxide = "oxide"
if str_oxide == "oxide":
nbonds = comp["O"]
return str_oxide, nbonds
def oxide_type(structure, relative_cutoff=1.1, return_nbonds=False):
"""
Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide
Args:
structure (Structure): Input structure.
relative_cutoff (float): Relative_cutoff * act. cutoff stipulates the
max distance two O atoms must be from each other.
return_nbonds (bool): Should number of bonds be requested?
"""
ox_obj = OxideType(structure, relative_cutoff)
if return_nbonds:
return ox_obj.oxide_type, ox_obj.nbonds
else:
return ox_obj.oxide_type
| |
import datetime
import pandas as pd
from yatt.ticker import aapl
from yatt.event import Event
from yatt.event import MarketEvent
from yatt.event import TickEvent
from yatt.event import BarEvent
from yatt.event import HistoricalDataEvent
from yatt.event import HistoricalTickEvent
from yatt.event import HistoricalBarEvent
from yatt.event import ActionEvent
from yatt.event import SignalEvent
from yatt.event import OrderEvent
from yatt.event import FillEvent
ticker = aapl
timestamp = datetime.datetime(2000, 1, 1)
def test_event():
event = Event(timestamp=timestamp, ticker=ticker)
assert event.type == Event
assert event.ticker == ticker
assert event.timestamp == timestamp
assert str(event) == 'Timestamp: 2000-01-01 00:00:00, Ticker: AAPL'
assert event.__repr__() == '<Event: Timestamp: 2000-01-01 00:00:00, Ticker: AAPL>'
def test_market_event():
event = MarketEvent(timestamp=timestamp, ticker=ticker)
assert event.type == MarketEvent
assert event.ticker == ticker
assert event.timestamp == timestamp
assert ticker.last_timestamp == timestamp
assert str(event) == 'Timestamp: 2000-01-01 00:00:00, Ticker: AAPL'
assert event.__repr__() == '<MarketEvent: Timestamp: 2000-01-01 00:00:00, Ticker: AAPL>'
def test_tick_event():
event = TickEvent(timestamp=timestamp, ticker=ticker, volume_bid=101, bid=11, ask=12, volume_ask=102)
assert event.type == TickEvent
assert event.ticker == ticker
assert event.timestamp == timestamp
assert ticker.last_timestamp == timestamp
assert ticker.last_value == (event.bid + event.ask) / 2
assert str(event) == 'Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, ' \
'Volume Bid: 101, Bid: 11, Ask: 12, Volume Ask: 102'
assert event.__repr__() == '<TickEvent: Timestamp: 2000-01-01 00:00:00, Ticker: AAPL,' \
' Volume Bid: 101, Bid: 11, Ask: 12, Volume Ask: 102>'
assert event.to_frame().equals(pd.DataFrame([{'timestamp': timestamp, 'ticker': ticker.symbol,
'volume_bid': 101, 'bid': 11, 'ask': 12, 'volume_ask': 102}]
).set_index(['timestamp', 'ticker']))
def test_bar_event():
event = BarEvent(timestamp=timestamp, ticker=ticker, period=60,
open_price=101, high_price=103, low_price=100, close_price=102,
volume=1000)
assert ticker.last_value == event.close_price
event = BarEvent(timestamp=timestamp, ticker=ticker, period=60,
open_price=101, high_price=103, low_price=100, close_price=102,
volume=1000, adj_close_price=51)
assert event.type == BarEvent
assert event.ticker == ticker
assert event.timestamp == timestamp
assert ticker.last_timestamp == timestamp
assert ticker.last_value == event.adj_close_price
assert str(event) == 'Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, ' \
'Period: 60, Open: 101, High: 103, Low: 100, Close: 102, Adj Close: 51, Volume: 1000'
assert event.__repr__() == '<BarEvent: Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, ' \
'Period: 60, Open: 101, High: 103, Low: 100, Close: 102, Adj Close: 51, Volume: 1000>'
assert event.to_frame().equals(pd.DataFrame([{'timestamp': timestamp, 'ticker': ticker.symbol, 'open': 101, 'high': 103,
'low': 100, 'close': 102, 'volume': 1000, 'adj_close': 51}]
).set_index(['timestamp', 'ticker']))
bar_df = pd.DataFrame([{'timestamp': datetime.datetime(2000, 0o1, 0o1), 'ticker': ticker.symbol,
'open': 101, 'high': 103, 'low': 100, 'close': 102, 'adj_close': 102, 'volume': 10000},
{'timestamp': datetime.datetime(2000, 0o1, 0o2), 'ticker': ticker.symbol,
'open': 102, 'high': 104, 'low': 101, 'close': 103, 'adj_close': 103, 'volume': 10001}],
).set_index(['timestamp', 'ticker'])
tick_df = pd.DataFrame([{'timestamp': datetime.datetime(2000, 0o1, 0o1), 'ticker': ticker.symbol,
'volume_bid': 101, 'bid': 11, 'ask': 12, 'volume_ask': 102},
{'timestamp': datetime.datetime(2000, 0o1, 0o2), 'ticker': ticker.symbol,
'volume_bid': 201, 'bid': 21, 'ask': 22, 'volume_ask': 202}
]).set_index(['timestamp', 'ticker'])
def test_historical_data_event():
event = HistoricalDataEvent(ticker=ticker, data_frame=bar_df)
assert event.type == HistoricalDataEvent
assert event.ticker == ticker
assert event.timestamp == datetime.datetime(2000, 0o1, 0o2)
assert event.start == datetime.datetime(2000, 0o1, 0o1)
assert event.end == datetime.datetime(2000, 0o1, 0o2)
assert event.length == 2
def test_historical_tick_event():
event = HistoricalTickEvent(ticker=ticker, data_frame=tick_df)
assert event.type == HistoricalTickEvent
assert event.ticker == ticker
assert event.timestamp == datetime.datetime(2000, 0o1, 0o2)
assert event.__repr__() == '<HistoricalTickEvent: Timestamp: 2000-01-02 00:00:00, Ticker: AAPL\n' \
'First row: Timestamp: 2000-01-01 00:00:00, Volume Bid: 101, Bid: 11, Ask: 12, Volume Ask: 102 \n' \
'Last row: Timestamp: 2000-01-02 00:00:00, Volume Bid: 201, Bid: 21, Ask: 22, Volume Ask: 202>'
assert event.start == datetime.datetime(2000, 0o1, 0o1)
assert event.end == datetime.datetime(2000, 0o1, 0o2)
assert event.length == 2
tick = TickEvent(timestamp=datetime.datetime(2000, 0o1, 0o2), ticker=ticker,
volume_bid=301, bid=31, ask=32, volume_ask=302)
event.upsert(tick)
assert event.start == datetime.datetime(2000, 0o1, 0o1)
assert event.end == datetime.datetime(2000, 0o1, 0o2)
assert event.length == 2
assert event.__repr__() == '<HistoricalTickEvent: Timestamp: 2000-01-02 00:00:00, Ticker: AAPL\n' \
'First row: Timestamp: 2000-01-01 00:00:00, Volume Bid: 101, Bid: 11, Ask: 12, Volume Ask: 102 \n' \
'Last row: Timestamp: 2000-01-02 00:00:00, Volume Bid: 301, Bid: 31, Ask: 32, Volume Ask: 302>'
tick = TickEvent(timestamp=datetime.datetime(2000, 0o1, 0o3), ticker=ticker,
volume_bid=401, bid=41, ask=42, volume_ask=402)
event.upsert(tick)
assert event.start == datetime.datetime(2000, 0o1, 0o1)
assert event.end == datetime.datetime(2000, 0o1, 0o3)
assert event.length == 3
assert event.__repr__() == '<HistoricalTickEvent: Timestamp: 2000-01-02 00:00:00, Ticker: AAPL\n' \
'First row: Timestamp: 2000-01-01 00:00:00, Volume Bid: 101, Bid: 11, Ask: 12, Volume Ask: 102 \n' \
'Last row: Timestamp: 2000-01-03 00:00:00, Volume Bid: 401, Bid: 41, Ask: 42, Volume Ask: 402>'
def test_historical_bar_event():
event = HistoricalBarEvent(ticker=ticker, data_frame=bar_df)
assert event.type == HistoricalBarEvent
assert event.ticker == ticker
assert event.timestamp == datetime.datetime(2000, 0o1, 0o2)
assert event.__repr__() == '<HistoricalBarEvent: Timestamp: 2000-01-02 00:00:00, Ticker: AAPL\n' \
'First row: Timestamp: 2000-01-01 00:00:00, Open: 101, High: 103, Low: 100,' \
' Close: 102, Adj Close: 102, Volume: 10000 \n' \
'Last row: Timestamp: 2000-01-02 00:00:00, Open: 102, High: 104, Low: 101,' \
' Close: 103, Adj Close: 103, Volume: 10001>'
assert event.start == datetime.datetime(2000, 0o1, 0o1)
assert event.end == datetime.datetime(2000, 0o1, 0o2)
assert event.length == 2
bar = BarEvent(timestamp=datetime.datetime(2000, 0o1, 0o2), ticker=ticker, period=60,
open_price=103, high_price=105, low_price=102, close_price=104,
volume=10002, adj_close_price=104)
event.upsert(bar)
assert event.start == datetime.datetime(2000, 0o1, 0o1)
assert event.end == datetime.datetime(2000, 0o1, 0o2)
assert event.length == 2
assert event.__repr__() == '<HistoricalBarEvent: Timestamp: 2000-01-02 00:00:00, Ticker: AAPL\n' \
'First row: Timestamp: 2000-01-01 00:00:00, Open: 101, High: 103, Low: 100,' \
' Close: 102, Adj Close: 102, Volume: 10000 \n' \
'Last row: Timestamp: 2000-01-02 00:00:00, Open: 103, High: 105, Low: 102,' \
' Close: 104, Adj Close: 104, Volume: 10002>'
bar = BarEvent(timestamp=datetime.datetime(2000, 0o1, 0o3), ticker=ticker, period=60,
open_price=104, high_price=106, low_price=103, close_price=105,
volume=10003, adj_close_price=105)
event.upsert(bar)
assert event.start == datetime.datetime(2000, 0o1, 0o1)
assert event.end == datetime.datetime(2000, 0o1, 0o3)
assert event.length == 3
assert event.__repr__() == '<HistoricalBarEvent: Timestamp: 2000-01-02 00:00:00, Ticker: AAPL\n' \
'First row: Timestamp: 2000-01-01 00:00:00, Open: 101, High: 103, Low: 100,' \
' Close: 102, Adj Close: 102, Volume: 10000 \n' \
'Last row: Timestamp: 2000-01-03 00:00:00, Open: 104, High: 106, Low: 103,' \
' Close: 105, Adj Close: 105, Volume: 10003>'
def test_action_event():
event = ActionEvent(timestamp=timestamp, ticker=ticker, action='BUY')
assert event.type == ActionEvent
assert event.ticker == ticker
assert event.timestamp == timestamp
assert event.action == 'BUY'
assert str(event) == 'Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, Action: BUY'
assert event.__repr__() == '<ActionEvent: Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, Action: BUY>'
def test_signal_event():
event = SignalEvent(timestamp=timestamp, ticker=ticker, action='BUY', suggested_quantity=100)
assert event.type == SignalEvent
assert event.ticker == ticker
assert event.timestamp == timestamp
assert event.action == 'BUY'
assert event.suggested_quantity == 100
assert str(event) == 'Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, Action: BUY, Suggested Quantity: 100'
assert event.__repr__() == '<SignalEvent: Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, ' \
'Action: BUY, Suggested Quantity: 100>'
def test_orderevent():
event = OrderEvent(timestamp=timestamp, ticker=ticker, action='BUY', quantity=100)
assert event.type == OrderEvent
assert event.ticker == ticker
assert event.timestamp == timestamp
assert event.action == 'BUY'
assert event.quantity == 100
assert str(event) == 'Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, Action: BUY, Quantity: 100'
assert event.__repr__() == '<OrderEvent: Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, Action: BUY, Quantity: 100>'
def test_fillevent():
event = FillEvent(timestamp=timestamp, ticker=ticker,
action='BUY', quantity=100, price=130, exchange='NYSE', commission=1.2)
assert event.type == FillEvent
assert event.ticker == ticker
assert event.timestamp == timestamp
assert event.action == 'BUY'
assert event.quantity == 100
assert event.price == 130
assert event.exchange == 'NYSE'
assert event.commission == 1.2
assert str(event) == 'Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, Action: BUY, Quantity: 100, ' \
'Price: 130, Exchange: NYSE, Commission: 1.2'
assert event.__repr__() == '<FillEvent: Timestamp: 2000-01-01 00:00:00, Ticker: AAPL, Action: BUY, ' \
'Quantity: 100, Price: 130, Exchange: NYSE, Commission: 1.2>'
| |
"""
Programmatic access to the information of the ISO-639-3 standard
ISO-639-3 data is not distributed with this package, but we fetch the download listed at
`<https://iso639-3.sil.org/code_tables/download_tables>`_
"""
import re
import string
import typing
import datetime
import functools
import collections
import urllib.request
from csvw.dsv import iterrows
from clldutils.path import TemporaryDirectory, Path
from clldutils.ziparchive import ZipArchive
BASE_URL = "https://iso639-3.sil.org/"
ZIP_NAME_PATTERN = re.compile(
r'(?P<name>sites/iso639-3/files/downloads/iso-639-3_Code_Tables_[0-9]{8}.zip)"')
TABLE_NAME_PATTERN = re.compile(r'/iso-639-3(?P<name_and_date>[^.]*)\.tab')
DATESTAMP_PATTERN = re.compile(r'(2[0-9]{3})([0-1][0-9])([0-3][0-9])')
USER_AGENT = 'Mozilla' # It seems a python user-agent doesn't cut it anymore.
# For some reason, the retirements code table gives the wrong replacement codes in two
# cases (although they are described correctly on the website):
CHANGE_TO_ERRATA = {
'guv': ['duz'],
'ymt': ['mtm'],
}
def _open(path):
return urllib.request.urlopen(
urllib.request.Request(BASE_URL + path, headers={'User-Agent': USER_AGENT}))
class Table(list):
def __init__(self, name_and_date, date, fp):
parts = name_and_date.split('_')
# The ISO 639-3 code tables from 2020-05-15 contain a table with a
# malformed name - having an excess "0" in the date stamp.
if parts[-1] == '202000515': # pragma: no cover
date = '20200515'
digits = map(int, DATESTAMP_PATTERN.match(date).groups())
self.date = datetime.date(*digits)
name = '_'.join([p for p in parts if not DATESTAMP_PATTERN.match(p)])
if name.startswith(('_', '-')):
name = name[1:]
if not name:
name = 'Codes'
self.name = name
super(Table, self).__init__(iterrows(
[line for line in fp.splitlines() if line.strip()], # strip malformed lines.
dicts=True,
delimiter='\t'))
def download_tables(outdir=None):
match = ZIP_NAME_PATTERN.search(_open('code_tables/download_tables').read().decode('utf-8-sig'))
if not match:
raise ValueError('no matching zip file name found') # pragma: no cover
target = Path(outdir or '.').joinpath(match.group('name').split('/')[-1])
with target.open('wb') as fp:
fp.write(_open(match.group('name')).read())
return target
def iter_tables(zippath=None):
with TemporaryDirectory() as tmp:
if not zippath:
zippath = download_tables(tmp)
with ZipArchive(zippath) as archive:
for name in archive.namelist():
date = DATESTAMP_PATTERN.search(name)
date = name[date.start():date.end()]
match = TABLE_NAME_PATTERN.search(name)
if match:
yield Table(match.group('name_and_date'), date, archive.read_text(name))
@functools.total_ordering
class Code(object):
"""
Represents one ISO 639-3 code and its associated metadata.
:ivar str code: The three-letter code
:ivar str name: The language name
"""
_code_pattern = re.compile(r'\[([a-z]{3})]')
_scope_map = {
'I': 'Individual',
'M': 'Macrolanguage',
'S': 'Special',
}
_type_map = {
'L': 'Living',
'E': 'Extinct',
'A': 'Ancient',
'H': 'Historical',
'C': 'Constructed',
'S': 'Special',
}
_rtype_map = {
'C': 'change',
'D': 'duplicate',
'N': 'non-existent',
'S': 'split',
'M': 'merge',
}
def __init__(self, item, tablename, registry):
code = item['Id']
self._change_to = []
self.retired = False
if tablename == 'Codes':
self._scope = self._scope_map[item['Scope']]
self._type = self._type_map[item['Language_Type']]
elif tablename == 'Retirements':
self._scope = 'Retirement'
self._type = self._rtype_map[item['Ret_Reason']] if item['Ret_Reason'] else None
self.retired = datetime.date(*map(int, item['Effective'].split('-')))
if code in CHANGE_TO_ERRATA:
self._change_to = CHANGE_TO_ERRATA[code] # pragma: no cover
else:
if item['Change_To']:
assert item['Change_To'] != code
self._change_to = [item['Change_To']]
elif item['Ret_Remedy']:
self._change_to = [
c for c in self._code_pattern.findall(item['Ret_Remedy'])
if c != code]
elif tablename == 'Local':
self._scope = 'Local'
self._type = 'Special'
else:
raise ValueError(tablename) # pragma: no cover
self.code = code
self.name = item['Ref_Name']
self._registry = registry
@property
def type(self) -> str:
return '{0}/{1}'.format(self._scope, self._type)
@property
def is_retired(self) -> bool:
"""
Flag signaling whether the code is retired.
"""
return bool(self.retired)
@property
def change_to(self) -> typing.List['Code']:
"""
List of codes that supersede a retired code.
"""
res = []
for code in self._change_to:
code = self._registry[code]
if not code.is_retired:
res.append(code)
else:
res.extend(code.change_to)
return res
@property
def is_local(self) -> bool:
"""
Flag signaling whether the code is in the private use area.
"""
return self._scope == 'Local'
@property
def is_macrolanguage(self) -> bool:
return self._scope == 'Macrolanguage'
@property
def extension(self) -> typing.List['Code']:
"""
The codes subsumed by a macrolanguage code.
"""
if self.is_macrolanguage:
return [self._registry[c] for c in self._registry._macrolanguage[self.code]]
return []
def __hash__(self):
return hash(self.code)
def __eq__(self, other):
return self.code == other.code
def __lt__(self, other):
return self.code < other.code
def __repr__(self):
return '<ISO-639-3 [{0}] {1}>'.format(self.code, self.type)
def __str__(self):
return '{0} [{1}]'.format(self.name, self.code)
class ISO(collections.OrderedDict):
"""
Provides access to the content of ISO 639-3's downloadable code table.
An `ISO` instance maps three-letter codes to :class:`Code` instances, and provides a couple
of convenience methods.
"""
def __init__(self, zippath=None):
"""
:param zippath: Path to a local copy of the "Complete Set of Tables" (UTF-8). If `None`, \
the tables will be retrieved from the web.
"""
self._tables = {t.name: t for t in iter_tables(zippath=zippath)}
if zippath and DATESTAMP_PATTERN.search(zippath.name):
digits = map(int, DATESTAMP_PATTERN.search(zippath.name).groups())
self.date = datetime.date(*digits)
else:
self.date = max(t.date for t in self._tables.values())
self._macrolanguage = collections.defaultdict(list)
for item in self._tables['macrolanguages']:
self._macrolanguage[item['M_Id']].append(item['I_Id'])
super(ISO, self).__init__()
for tablename in ['Codes', 'Retirements']:
for item in self._tables[tablename]:
if item['Id'] not in self:
# Note: we don't keep historical retirements, i.e. ones that have only
# been in effect for some time. E.g. lcq has been changed to ppr
# from 2012-02-03 until 2013-01-23 when it was changed back to lcq
self[item['Id']] = Code(item, tablename, self)
for code in ['q' + x + y
for x in string.ascii_lowercase[:string.ascii_lowercase.index('t') + 1]
for y in string.ascii_lowercase]:
self[code] = Code(dict(Id=code, Ref_Name=None), 'Local', self)
def __str__(self):
return 'ISO 639-3 code tables from {0}'.format(self.date)
def by_type(self, type_):
return [c for c in self.values() if c._type == type_]
@property
def living(self) -> typing.List[Code]:
"""
All codes categorized as "Living"
"""
return self.by_type('Living')
@property
def extinct(self) -> typing.List[Code]:
"""
All codes categorized as "Extinct"
"""
return self.by_type('Extinct')
@property
def ancient(self) -> typing.List[Code]:
"""
All codes categorized as "Ancient"
"""
return self.by_type('Ancient')
@property
def historical(self) -> typing.List[Code]:
"""
All codes categorized as "Historical"
"""
return self.by_type('Historical')
@property
def constructed(self) -> typing.List[Code]:
"""
All codes categorized as "Constructed"
"""
return self.by_type('Constructed')
@property
def special(self) -> typing.List[Code]:
"""
All codes categorized as "Special"
"""
return self.by_type('Special')
@property
def retirements(self) -> typing.List[Code]:
"""
All retired codes
"""
return [c for c in self.values() if c.is_retired]
@property
def macrolanguages(self) -> typing.List[Code]:
"""
All macrolanguage codes
"""
return [c for c in self.values() if c.is_macrolanguage]
@property
def languages(self) -> typing.List[Code]:
"""
All active language codes
"""
return [c for c in self.values()
if not c.is_macrolanguage and not c.is_retired and not c.is_local]
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from _py2tmp.compiler.testing import main, assert_compilation_succeeds, assert_conversion_fails
@assert_compilation_succeeds()
def test_type_literal_success():
from tmppy import Type
assert Type('int') == Type('int')
@assert_compilation_succeeds()
def test_type_pointer_literal_success():
from tmppy import Type
assert Type.pointer(Type('int')) == Type.pointer(Type('int'))
@assert_compilation_succeeds()
def test_type_reference_literal_success():
from tmppy import Type
assert Type.reference(Type('int')) == Type.reference(Type('int'))
@assert_compilation_succeeds(always_allow_toplevel_static_asserts_after_optimization=True)
def test_type_reference_literal_collapsed():
from tmppy import Type
assert Type.reference(Type('int')) == Type.reference(Type.reference(Type('int')))
@assert_compilation_succeeds()
def test_type_rvalue_reference_literal_success():
from tmppy import Type
assert Type.rvalue_reference(Type('int')) == Type.rvalue_reference(Type('int'))
@assert_compilation_succeeds(always_allow_toplevel_static_asserts_after_optimization=True)
def test_type_rvalue_reference_literal_different_from_two_references_success():
from tmppy import Type
assert Type.rvalue_reference(Type('int')) != Type.reference(Type.reference(Type('int')))
@assert_compilation_succeeds(always_allow_toplevel_static_asserts_after_optimization=True)
def test_type_rvalue_reference_literal_collapsed():
from tmppy import Type
assert Type.rvalue_reference(Type('int')) == Type.rvalue_reference(Type.rvalue_reference(Type('int')))
@assert_compilation_succeeds(always_allow_toplevel_static_asserts_after_optimization=True)
def test_type_rvalue_reference_literal_collapsed_with_reference():
from tmppy import Type
assert Type.reference(Type('int')) == Type.rvalue_reference(Type.reference(Type('int')))
@assert_compilation_succeeds(always_allow_toplevel_static_asserts_after_optimization=True)
def test_type_rvalue_reference_literal_collapsed_with_reference_reverse_order():
from tmppy import Type
assert Type.reference(Type('int')) == Type.rvalue_reference(Type.reference(Type('int')))
@assert_compilation_succeeds()
def test_const_type_literal_success():
from tmppy import Type
assert Type.const(Type('int')) == Type.const(Type('int'))
@assert_compilation_succeeds()
def test_type_array_literal_success():
from tmppy import Type
assert Type.array(Type('int')) == Type.array(Type('int'))
@assert_compilation_succeeds()
def test_type_function_literal_with_no_args_success():
from tmppy import Type, empty_list
assert Type.function(Type('int'), empty_list(Type)) == Type.function(Type('int'), empty_list(Type))
@assert_compilation_succeeds()
def test_type_function_pointer_literal_with_no_args_success():
from tmppy import Type, empty_list
assert Type.pointer(Type.function(Type('int'), empty_list(Type))) == Type.pointer(Type.function(Type('int'), empty_list(Type)))
@assert_compilation_succeeds()
def test_type_function_literal_success():
from tmppy import Type
assert Type.function(Type('int'), [Type('float'), Type('double')]) == Type.function(Type('int'), [Type('float'), Type('double')])
@assert_compilation_succeeds()
def test_type_function_pointer_literal_success():
from tmppy import Type
assert Type.pointer(Type.function(Type('int'), [Type('float'), Type('double')])) == Type.pointer(Type.function(Type('int'), [Type('float'), Type('double')]))
@assert_conversion_fails
def test_type_literal_no_arguments_error():
from tmppy import Type
def f(x: bool):
return Type() # error: Type\(\) takes 1 argument. Got: 0
@assert_conversion_fails
def test_type_literal_too_many_arguments_error():
from tmppy import Type
def f(x: bool):
return Type('', '') # error: Type\(\) takes 1 argument. Got: 2
@assert_conversion_fails
def test_type_literal_argument_with_wrong_type_error():
from tmppy import Type
def f(x: bool):
return Type(x) # error: The argument passed to Type should be a string constant.
@assert_conversion_fails
def test_type_literal_kwargs_arg_not_supported():
from tmppy import Type
def f(x: bool):
y = 1
return Type(**y) # error: Keyword arguments are not supported in Type\(\)
@assert_conversion_fails
def test_type_literal_keyword_arg_error():
from tmppy import Type
def f(x: bool):
return Type('T', T=Type('int')) # error: Keyword arguments are not supported in Type\(\)
@assert_compilation_succeeds()
def test_template_instantiation_literal_success():
from tmppy import Type
assert Type.pointer(Type('int')) == Type.template_instantiation('std::add_pointer', [Type('int')]).type
@assert_compilation_succeeds(always_allow_toplevel_static_asserts_after_optimization=True, extra_cpp_prelude='''\
struct Holder {
template <typename T, typename U>
struct Inner {
using type = U*;
};
};
''')
def test_template_member_literal_success():
from tmppy import Type
assert Type.pointer(Type('float')) == Type.template_member(Type('Holder'), 'Inner', [Type('int'), Type('float')]).type
@assert_conversion_fails
def test_template_member_keyword_arg():
from tmppy import Type
def f(x: bool):
return Type.template_member(
type=Type('int')) # error: Keyword arguments are not supported in Type.template_member\(\)
@assert_conversion_fails
def test_template_member_literal_no_args_error():
from tmppy import Type
def f(x: bool):
return Type.template_member() # error: Type.template_member\(\) takes 3 arguments. Got: 0
@assert_conversion_fails
def test_template_member_literal_one_arg_error():
from tmppy import Type
def f(x: bool):
return Type.template_member(Type('int')) # error: Type.template_member\(\) takes 3 arguments. Got: 1
@assert_conversion_fails
def test_template_member_literal_two_args_error():
from tmppy import Type
def f(x: bool):
return Type.template_member(Type('int'), 'foo') # error: Type.template_member\(\) takes 3 arguments. Got: 2
@assert_conversion_fails
def test_template_member_literal_four_args_error():
from tmppy import Type
def f(x: bool):
return Type.template_member(Type('int'), 'foo', 'bar', 'baz') # error: Type.template_member\(\) takes 3 arguments. Got: 4
@assert_conversion_fails
def test_template_member_literal_first_arg_incorrect_type():
from tmppy import Type
def f(x: bool):
return Type.template_member(
4, # error: The first argument passed to Type.template_member should have type Type, but was: int
'foo', [])
@assert_conversion_fails
def test_template_member_literal_second_arg_incorrect_kind():
from tmppy import Type
def f(x: bool):
return Type.template_member(Type('int'),
15, # error: The second argument passed to Type.template_member should be a string
[])
@assert_conversion_fails
def test_template_member_literal_second_arg_not_an_identifier():
from tmppy import Type
def f(x: bool):
return Type.template_member(Type('int'),
'1abc', # error: The second argument passed to Type.template_member should be a valid C\+\+ identifier
[])
@assert_conversion_fails
def test_template_member_literal_third_arg_incorrect_type():
from tmppy import Type
def f(x: bool):
return Type.template_member(Type('int'), 'foo',
15) # error: The third argument passed to Type.template_member should have type List\[Type\], but was: int
@assert_compilation_succeeds(extra_cpp_prelude='''\
#include <string>
''')
def test_type_literal_qualified_ok():
from tmppy import Type
def f(x: bool):
return Type('std::string')
@assert_conversion_fails
def test_type_literal_not_atomic_error():
from tmppy import Type
def f(x: bool):
return Type('std::vector<bool>') # error: Invalid atomic type. Atomic types should be C\+\+ identifiers \(possibly namespace-qualified\).
@assert_conversion_fails
def test_template_instantiation_literal_not_atomic_error():
from tmppy import Type
def f(x: bool):
return Type.template_instantiation('std::dummy<int>::add_pointer', [Type('int')]) # error: Invalid atomic type. Atomic types should be C\+\+ identifiers \(possibly namespace-qualified\).
@assert_conversion_fails
def test_template_instantiation_literal_keyword_arg_error():
from tmppy import Type
def f(x: bool):
return Type.template_instantiation(
template_atomic_type='std::dummy<int>::add_pointer', # error: Keyword arguments are not supported in Type.template_instantiation\(\)
args=[Type('int')])
@assert_conversion_fails
def test_template_instantiation_literal_no_args_error():
from tmppy import Type
def f(x: bool):
return Type.template_instantiation() # error: Type.template_instantiation\(\) takes 2 arguments. Got: 0
@assert_conversion_fails
def test_template_instantiation_literal_one_arg_error():
from tmppy import Type
def f(x: bool):
return Type.template_instantiation('int') # error: Type.template_instantiation\(\) takes 2 arguments. Got: 1
@assert_conversion_fails
def test_template_instantiation_literal_three_arg_error():
from tmppy import Type
def f(x: bool):
return Type.template_instantiation('F<X, Y>', Type('int'), Type('float')) # error: Type.template_instantiation\(\) takes 2 arguments. Got: 3
@assert_conversion_fails
def test_template_instantiation_literal_incorrect_first_arg_type():
from tmppy import Type
def f(x: bool):
return Type.template_instantiation(
1, # error: The first argument passed to Type.template_instantiation should be a string
[Type('int')])
@assert_conversion_fails
def test_template_instantiation_literal_incorrect_second_arg_type():
from tmppy import Type
def f(x: bool):
return Type.template_instantiation(
'std::vector',
Type('int')) # error: The second argument passed to Type.template_instantiation should have type List\[Type\], but was: Type
@assert_conversion_fails
def test_type_pointer_literal_with_keyword_argument():
from tmppy import Type
def f(x: bool):
return Type.pointer(x=1) # error: Keyword arguments are not supported in Type.pointer\(\)
@assert_conversion_fails
def test_type_pointer_literal_with_multiple_args():
from tmppy import Type
def f(x: bool):
return Type.pointer('x', 'y') # error: Type.pointer\(\) takes 1 argument. Got: 2
@assert_conversion_fails
def test_type_pointer_literal_with_arg_of_incorrect_type():
from tmppy import Type
def f(x: bool):
return Type.pointer(5) # error: The argument passed to Type.pointer\(\) should have type Type, but was: int
@assert_conversion_fails
def test_type_reference_literal_with_keyword_argument():
from tmppy import Type
def f(x: bool):
return Type.reference(x=1) # error: Keyword arguments are not supported in Type.reference\(\)
@assert_conversion_fails
def test_type_reference_literal_with_multiple_args():
from tmppy import Type
def f(x: bool):
return Type.reference('x', 'y') # error: Type.reference\(\) takes 1 argument. Got: 2
@assert_conversion_fails
def test_type_reference_literal_with_arg_of_incorrect_type():
from tmppy import Type
def f(x: bool):
return Type.reference(5) # error: The argument passed to Type.reference\(\) should have type Type, but was: int
@assert_conversion_fails
def test_type_rvalue_reference_literal_with_keyword_argument():
from tmppy import Type
def f(x: bool):
return Type.rvalue_reference(x=1) # error: Keyword arguments are not supported in Type.rvalue_reference\(\)
@assert_conversion_fails
def test_type_rvalue_reference_literal_with_multiple_args():
from tmppy import Type
def f(x: bool):
return Type.rvalue_reference('x', 'y') # error: Type.rvalue_reference\(\) takes 1 argument. Got: 2
@assert_conversion_fails
def test_type_rvalue_reference_literal_with_arg_of_incorrect_type():
from tmppy import Type
def f(x: bool):
return Type.rvalue_reference(5) # error: The argument passed to Type.rvalue_reference\(\) should have type Type, but was: int
@assert_conversion_fails
def test_type_const_literal_with_keyword_argument():
from tmppy import Type
def f(x: bool):
return Type.const(x=1) # error: Keyword arguments are not supported in Type.const\(\)
@assert_conversion_fails
def test_type_const_literal_with_multiple_args():
from tmppy import Type
def f(x: bool):
return Type.const('x', 'y') # error: Type.const\(\) takes 1 argument. Got: 2
@assert_conversion_fails
def test_type_const_literal_with_arg_of_incorrect_type():
from tmppy import Type
def f(x: bool):
return Type.const(5) # error: The argument passed to Type.const\(\) should have type Type, but was: int
@assert_conversion_fails
def test_type_array_literal_with_keyword_argument():
from tmppy import Type
def f(x: bool):
return Type.array(x=1) # error: Keyword arguments are not supported in Type.array\(\)
@assert_conversion_fails
def test_type_array_literal_with_multiple_args():
from tmppy import Type
def f(x: bool):
return Type.array('x', 'y') # error: Type.array\(\) takes 1 argument. Got: 2
@assert_conversion_fails
def test_type_array_literal_with_arg_of_incorrect_type():
from tmppy import Type
def f(x: bool):
return Type.array(5) # error: The argument passed to Type.array\(\) should have type Type, but was: int
@assert_conversion_fails
def test_type_function_literal_with_too_few_args():
from tmppy import Type
def f(x: bool):
return Type.function('int') # error: Type.function\(\) takes 2 arguments. Got: 1
@assert_conversion_fails
def test_type_function_literal_with_too_many_args():
from tmppy import Type
def f(x: bool):
return Type.function('int', 'float', 'char') # error: Type.function\(\) takes 2 arguments. Got: 3
@assert_conversion_fails
def test_type_function_literal_with_keyword_argument():
from tmppy import Type
def f(x: bool):
return Type.function(x=1) # error: Keyword arguments are not supported in Type.function\(\)
@assert_conversion_fails
def test_type_function_literal_incorrect_first_arg_type():
from tmppy import Type
def f(x: bool):
return Type.function(1, []) # error: The first argument passed to Type.function should have type Type, but was: int
@assert_conversion_fails
def test_type_function_literal_incorrect_second_arg_type():
from tmppy import Type
def f(x: bool):
return Type.function(Type('double'), Type('int')) # error: The second argument passed to Type.function should have type List\[Type\], but was: Type
@assert_conversion_fails
def test_type_undefined_static_method():
from tmppy import Type
def f(x: bool):
return Type.i_do_not_exist() # error: Undefined Type factory method
if __name__== '__main__':
main()
| |
import copy
import logging
import math
import os
import torch
import torch.nn as nn
from nmmt.models import Translation
from nmmt.IDataset import DatasetWrapper
from nmmt.SubwordTextProcessor import SubwordTextProcessor
from nmmt.internal_utils import opts_object, log_timed_action
from nmmt.torch_utils import torch_is_multi_gpu, torch_is_using_cuda, torch_get_gpus
from onmt import Models, Translator, Constants, Dataset, Optim
class _Translator(Translator):
def __init__(self, src_dict, trg_dict, model):
# Super constructor MUST NOT be invoked
# super(_Translator, self).__init__(None)
self.opt = opts_object()
self.opt.alignment = True
self.opt.batch_size = 32
self.opt.cuda = torch_is_using_cuda()
self.tt = torch.cuda if self.opt.cuda else torch
self.beam_accum = None
self.src_dict = src_dict
self.tgt_dict = trg_dict
self._type = 'text'
self.model = model
class ModelFileNotFoundException(BaseException):
def __init__(self, path):
self.message = "Model file not found: %s" % path
class NMTEngine:
class Metadata:
__custom_values = {'True': True, 'False': False, 'None': None}
def __init__(self):
self.layers = 2 # Number of layers in the LSTM encoder/decoder
self.rnn_size = 500 # Size of hidden states
self.rnn_type = 'LSTM' # The gate type used in the RNNs
self.word_vec_size = 500 # Word embedding sizes
self.input_feed = 1 # Feed the context vector at each time step as additional input to the decoder
self.brnn = True # Use a bidirectional encoder
self.brnn_merge = 'sum' # Merge action for the bidirectional hidden states: [concat|sum]
self.context_gate = None # Type of context gate to use [source|target|both] or None.
self.dropout = 0.3 # Dropout probability; applied between LSTM stacks.
# Tuning options -------------------------------------------------------------------------------------------
self.tuning_optimizer = 'sgd' # Optimization method. [sgd|adagrad|adadelta|adam]
self.tuning_max_grad_norm = 5 # If norm(gradient vector) > max_grad_norm, re-normalize
self.tuning_max_learning_rate = 0.2
self.tuning_max_epochs = 10
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def load_from_file(self, path):
with open(path, 'rb') as metadata_stream:
for line in metadata_stream:
key, value = (x.strip() for x in line.split('=', 1))
if key not in self.__dict__:
continue
if value in self.__custom_values:
value = self.__custom_values[value]
else:
try:
number = float(value)
value = number if '.' in value else int(value)
except ValueError:
pass # value is a string
self.__dict__[key] = value
def save_to_file(self, path):
with open(path, 'wb') as metadata_file:
for key, value in self.__dict__.iteritems():
if value is not None:
metadata_file.write('%s = %s\n' % (key, str(value)))
@staticmethod
def new_instance(src_dict, trg_dict, processor, metadata=None, init_value=0.1):
if metadata is None:
metadata = NMTEngine.Metadata()
encoder = Models.Encoder(metadata, src_dict)
decoder = Models.Decoder(metadata, trg_dict)
generator = nn.Sequential(nn.Linear(metadata.rnn_size, trg_dict.size()), nn.LogSoftmax())
model = Models.NMTModel(encoder, decoder)
if torch_is_using_cuda():
model.cuda()
generator.cuda()
if torch_is_multi_gpu():
model = nn.DataParallel(model, device_ids=torch_get_gpus(), dim=1)
generator = nn.DataParallel(generator, device_ids=torch_get_gpus(), dim=0)
else:
model.cpu()
generator.cpu()
model.generator = generator
for p in model.parameters():
p.data.uniform_(-init_value, init_value)
return NMTEngine(src_dict, trg_dict, model, processor, metadata=metadata)
@staticmethod
def load_from_checkpoint(checkpoint_path):
metadata_file = checkpoint_path + '.meta'
processor_file = checkpoint_path + '.bpe'
data_file = checkpoint_path + '.dat'
if not os.path.isfile(processor_file):
raise ModelFileNotFoundException(processor_file)
if not os.path.isfile(data_file):
raise ModelFileNotFoundException(data_file)
# Metadata
metadata = NMTEngine.Metadata()
if os.path.isfile(metadata_file):
metadata.load_from_file(metadata_file)
# Processor
processor = SubwordTextProcessor.load_from_file(processor_file)
# Data
checkpoint = torch.load(data_file, map_location=lambda storage, loc: storage)
src_dict = checkpoint['dicts']['src']
trg_dict = checkpoint['dicts']['tgt']
encoder = Models.Encoder(metadata, src_dict)
decoder = Models.Decoder(metadata, trg_dict)
model = Models.NMTModel(encoder, decoder)
model.load_state_dict(checkpoint['model'])
generator = nn.Sequential(nn.Linear(metadata.rnn_size, trg_dict.size()), nn.LogSoftmax())
generator.load_state_dict(checkpoint['generator'])
if torch_is_using_cuda():
model.cuda()
generator.cuda()
else:
model.cpu()
generator.cpu()
model.generator = generator
model.eval()
return NMTEngine(src_dict, trg_dict, model, processor, metadata=metadata)
def __init__(self, src_dict, trg_dict, model, processor, metadata=None):
self._logger = logging.getLogger('nmmt.NMTEngine')
self._log_level = logging.INFO
self._model_loaded = False
self.src_dict = src_dict
self.trg_dict = trg_dict
self.model = model
self.processor = processor
self.metadata = metadata if metadata is not None else NMTEngine.Metadata()
self._translator = None # lazy load
self._tuner = None # lazy load
# Compute initial state
model_state_dict, generator_state_dict = self._get_state_dicts()
self._model_init_state = {k: v for k, v in sorted(model_state_dict.items()) if 'generator' not in k}
self._model_init_state.update({"generator." + k: v for k, v in sorted(generator_state_dict.items())})
def reset_model(self):
with log_timed_action(self._logger, 'Restoring model initial state', log_start=False):
self.model.load_state_dict(self._model_init_state)
self.model.encoder.rnn.dropout = 0.
self.model.decoder.dropout = nn.Dropout(0.)
self.model.decoder.rnn.dropout = nn.Dropout(0.)
self._model_loaded = True
def _ensure_model_loaded(self):
if not self._model_loaded:
self.reset_model()
def count_parameters(self):
return sum([p.nelement() for p in self.model.parameters()])
def tune(self, suggestions, epochs=None, learning_rate=None):
# Set tuning parameters
if epochs is None or learning_rate is None:
_epochs, _learning_rate = self._estimate_tuning_parameters(suggestions)
epochs = epochs if epochs is not None else _epochs
learning_rate = learning_rate if learning_rate is not None else _learning_rate
if learning_rate > 0. or epochs > 0:
if self._tuner is None:
from nmmt.NMTEngineTrainer import NMTEngineTrainer
optimizer = Optim(self.metadata.tuning_optimizer, 1., max_grad_norm=self.metadata.tuning_max_grad_norm)
tuner_opts = NMTEngineTrainer.Options()
tuner_opts.log_level = logging.NOTSET
self._tuner = NMTEngineTrainer(self, options=tuner_opts, optimizer=optimizer)
self._tuner.opts.step_limit = epochs
self._tuner.reset_learning_rate(learning_rate)
# Process suggestions
tuning_src_batch, tuning_trg_batch = [], []
for suggestion in suggestions:
source = self.processor.encode_line(suggestion.source, is_source=True)
source = self.src_dict.convertToIdxTensor(source, Constants.UNK_WORD)
target = self.processor.encode_line(suggestion.target, is_source=False)
target = self.trg_dict.convertToIdxTensor(target, Constants.UNK_WORD, Constants.BOS_WORD,
Constants.EOS_WORD)
tuning_src_batch.append(source)
tuning_trg_batch.append(target)
tuning_set = Dataset(tuning_src_batch, tuning_trg_batch, len(tuning_src_batch), torch_is_using_cuda())
tuning_set = DatasetWrapper(tuning_set)
# Run tuning
log_message = 'Tuning on %d suggestions (epochs = %d, learning_rate = %.3f )' % (
len(suggestions), self._tuner.opts.step_limit, self._tuner.optimizer.lr)
with log_timed_action(self._logger, log_message, log_start=False):
self._tuner.train_model(tuning_set)
def _estimate_tuning_parameters(self, suggestions):
# it returns an actual learning_rate and epochs based on the quality of the suggestions
# it is assured that at least one suggestion is provided (hence, len(suggestions) > 0)
average_score = 0.0
for suggestion in suggestions:
average_score += suggestion.score
average_score /= len(suggestions)
# Empirically defined function to make the number of epochs dependent to the quality of the suggestions
# epochs = max_epochs * average_score + 1
# where max_epochs is the maximum number of epochs allowed;
# hence epochs = max_epochs only with perfect suggestions
# and epochs = 0, when the average_score is close to 0.0 (<1/max_epochs)
tuning_epochs = int(self.metadata.tuning_max_epochs * average_score)
# Empirically defined function to make the learning rate dependent to the quality of the suggestions
# lr = max_lr * sqrt(average_score)
# hence lr = max_lr only with perfect suggestions
# and lr = 0, when the average_score is exactly 0.0
tuning_learning_rate = self.metadata.tuning_max_learning_rate * math.sqrt(average_score)
return tuning_epochs, tuning_learning_rate
def translate(self, text, beam_size=5, max_sent_length=160, replace_unk=False, n_best=1):
self._ensure_model_loaded()
self.model.eval()
if self._translator is None:
self._translator = _Translator(self.src_dict, self.trg_dict, self.model)
self._translator.opt.replace_unk = replace_unk
self._translator.opt.beam_size = max(beam_size, n_best)
self._translator.opt.max_sent_length = max_sent_length
self._translator.opt.n_best = n_best
src_bpe_tokens = self.processor.encode_line(text, is_source=True)
pred_batch, _, _, align_batch = self._translator.translate([src_bpe_tokens], None)
translations = []
for trg_bpe_tokens, bpe_alignment in zip(pred_batch[0], align_batch[0]):
src_indexes = self.processor.get_words_indexes(src_bpe_tokens)
trg_indexes = self.processor.get_words_indexes(trg_bpe_tokens)
translation = Translation(text=self.processor.decode_tokens(trg_bpe_tokens),
alignment=self._make_alignment(src_indexes, trg_indexes, bpe_alignment))
translations.append(translation)
return translations
@staticmethod
def _make_alignment(src_indexes, trg_indexes, bpe_alignment):
if not bpe_alignment:
return []
return sorted(set([(src_indexes[al[0]], trg_indexes[al[1]]) for al in bpe_alignment]))
def save(self, path, store_data=True, store_metadata=True, store_processor=True):
if store_metadata:
self.metadata.save_to_file(path + '.meta')
if store_processor:
self.processor.save_to_file(path + '.bpe')
if store_data:
model_state_dict, generator_state_dict = self._get_state_dicts()
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'dicts': {'src': self.src_dict, 'tgt': self.trg_dict},
}
torch.save(checkpoint, path + '.dat')
def _get_state_dicts(self):
is_multi_gpu = torch_is_multi_gpu()
model = self.model.module if is_multi_gpu else self.model
generator = self.model.generator.module if is_multi_gpu else self.model.generator
model_state_dict = {k: v for k, v in model.state_dict().items() if 'generator' not in k}
generator_state_dict = generator.state_dict()
return copy.deepcopy(model_state_dict), copy.deepcopy(generator_state_dict)
| |
#!/usr/bin/env python
#
# This code was copied from the data generation program of Tencent Alchemy
# project (https://github.com/tencent-alchemy).
#
#
# #
# # Copyright 2019 Tencent America LLC. All Rights Reserved.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# #
# # Author: Qiming Sun <osirpt.sun@gmail.com>
# #
'''
Non-relativistic UHF analytical Hessian
'''
import time
import numpy
import numpy as np
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.hessian import rhf as rhf_hess
from pyscf.hessian import uhf as uhf_hess
from pyscf.df.hessian.rhf import _int3c_wrapper, _load_dim0
def partial_hess_elec(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,
atmlst=None, max_memory=4000, verbose=None):
e1, ej, ek = _partial_hess_ejk(hessobj, mo_energy, mo_coeff, mo_occ,
atmlst, max_memory, verbose, True)
return e1 + ej - ek
def _partial_hess_ejk(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,
atmlst=None, max_memory=4000, verbose=None, with_k=True):
log = logger.new_logger(hessobj, verbose)
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
mf = hessobj.base
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
if atmlst is None: atmlst = range(mol.natm)
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
nocca = mocca.shape[1]
noccb = moccb.shape[1]
dm0a = numpy.dot(mocca, mocca.T)
dm0b = numpy.dot(moccb, moccb.T)
dm0 = dm0a + dm0b
# Energy weighted density matrix
mo_ea = mo_energy[0][mo_occ[0]>0]
mo_eb = mo_energy[1][mo_occ[1]>0]
dme0 = numpy.einsum('pi,qi,i->pq', mocca, mocca, mo_ea)
dme0+= numpy.einsum('pi,qi,i->pq', moccb, moccb, mo_eb)
auxmol = hessobj.base.with_df.auxmol
naux = auxmol.nao
nbas = mol.nbas
auxslices = auxmol.aoslice_by_atom()
aoslices = mol.aoslice_by_atom()
aux_loc = auxmol.ao_loc
blksize = min(480, hessobj.max_memory*.3e6/8/nao**2)
aux_ranges = ao2mo.outcore.balance_partition(auxmol.ao_loc, blksize)
hcore_deriv = hessobj.hcore_generator(mol)
s1aa, s1ab, s1a = rhf_hess.get_ovlp(mol)
ftmp = lib.H5TmpFile()
get_int3c = _int3c_wrapper(mol, auxmol, 'int3c2e', 's1')
# Without RI basis response
# (20|0)(0|00)
# (11|0)(0|00)
# (10|0)(0|10)
int2c = auxmol.intor('int2c2e', aosym='s1')
int2c_low = scipy.linalg.cho_factor(int2c, lower=True)
int2c_ip1 = auxmol.intor('int2c2e_ip1', aosym='s1')
rhoj0_P = 0
if with_k:
if hessobj.max_memory*.4e6/8 < naux*nocca*(nocca+nao):
raise RuntimeError('Memory not enough. You need to increase mol.max_memory')
rhok0a_Pl_ = np.empty((naux,nao,nocca))
rhok0b_Pl_ = np.empty((naux,nao,noccb))
for i, (shl0, shl1, p0, p1) in enumerate(aoslices):
int3c = get_int3c((shl0, shl1, 0, nbas, 0, auxmol.nbas))
rhoj0_P += np.einsum('klp,kl->p', int3c, dm0[p0:p1])
if with_k:
tmp = lib.einsum('ijp,jk->pik', int3c, mocca)
tmp = scipy.linalg.cho_solve(int2c_low, tmp.reshape(naux,-1), overwrite_b=True)
rhok0a_Pl_[:,p0:p1] = tmp.reshape(naux,p1-p0,nocca)
tmp = lib.einsum('ijp,jk->pik', int3c, moccb)
tmp = scipy.linalg.cho_solve(int2c_low, tmp.reshape(naux,-1), overwrite_b=True)
rhok0b_Pl_[:,p0:p1] = tmp.reshape(naux,p1-p0,noccb)
int3c = tmp = None
rhoj0_P = scipy.linalg.cho_solve(int2c_low, rhoj0_P)
get_int3c_ipip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ipip1', 's1')
vj1_diag = 0
vk1a_diag = 0
vk1b_diag = 0
for shl0, shl1, nL in aux_ranges:
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
p0, p1 = aux_loc[shl0], aux_loc[shl1]
int3c_ipip1 = get_int3c_ipip1(shls_slice)
vj1_diag += np.einsum('xijp,p->xij', int3c_ipip1, rhoj0_P[p0:p1]).reshape(3,3,nao,nao)
if with_k:
tmp = lib.einsum('Plj,Jj->PlJ', rhok0a_Pl_[p0:p1], mocca)
vk1a_diag += lib.einsum('xijp,plj->xil', int3c_ipip1, tmp).reshape(3,3,nao,nao)
tmp = lib.einsum('Plj,Jj->PlJ', rhok0b_Pl_[p0:p1], moccb)
vk1b_diag += lib.einsum('xijp,plj->xil', int3c_ipip1, tmp).reshape(3,3,nao,nao)
vhfa_diag = vj1_diag-vk1a_diag
vhfb_diag = vj1_diag-vk1b_diag
t1 = log.timer_debug1('contracting int2e_ipip1', *t1)
int3c_ipip1 = get_int3c_ipip1 = tmp = None
get_int3c_ip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip1', 's1')
rho_ip1 = ftmp.create_dataset('rho_ip1', (nao,nao,naux,3), 'f8')
rhoka_ip1_IkP = ftmp.create_group('rhoka_ip1_IkP')
rhokb_ip1_IkP = ftmp.create_group('rhokb_ip1_IkP')
rhoka_ip1_PkI = ftmp.create_group('rhoka_ip1_PkI')
rhokb_ip1_PkI = ftmp.create_group('rhokb_ip1_PkI')
rhoj1 = np.empty((mol.natm,naux,3))
wj1 = np.empty((mol.natm,naux,3))
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1, 0, nbas, 0, auxmol.nbas)
int3c_ip1 = get_int3c_ip1(shls_slice)
tmp_ip1 = scipy.linalg.cho_solve(int2c_low, int3c_ip1.reshape(-1,naux).T,
overwrite_b=True).reshape(naux,3,p1-p0,nao)
rhoj1[i0] = np.einsum('pxij,ji->px', tmp_ip1, dm0[:,p0:p1])
wj1[i0] = np.einsum('xijp,ji->px', int3c_ip1, dm0[:,p0:p1])
rho_ip1[p0:p1] = tmp_ip1.transpose(2,3,0,1)
if with_k:
tmp = lib.einsum('pykl,li->ikpy', tmp_ip1, dm0a)
rhoka_ip1_IkP['%.4d'%ia] = tmp
rhoka_ip1_PkI['%.4d'%ia] = tmp.transpose(2,1,0,3)
tmp = None
tmp = lib.einsum('pykl,li->ikpy', tmp_ip1, dm0b)
rhokb_ip1_IkP['%.4d'%ia] = tmp
rhokb_ip1_PkI['%.4d'%ia] = tmp.transpose(2,1,0,3)
tmp = None
ej = lib.einsum('ipx,jpy->ijxy', rhoj1, wj1) * 4
ek = np.zeros_like(ej)
e1 = np.zeros_like(ej)
rhoj1 = wj1 = None
if with_k:
vk2a_buf = 0
vk2b_buf = 0
for shl0, shl1, nL in aux_ranges:
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
p0, p1 = aux_loc[shl0], aux_loc[shl1]
int3c_ip1 = get_int3c_ip1(shls_slice)
vk2a_buf += lib.einsum('xijp,pkjy->xyki', int3c_ip1, _load_dim0(rhoka_ip1_PkI, p0, p1))
vk2b_buf += lib.einsum('xijp,pkjy->xyki', int3c_ip1, _load_dim0(rhokb_ip1_PkI, p0, p1))
get_int3c_ip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip2', 's1')
wj_ip2 = np.empty((naux,3))
wka_ip2_Ipk = ftmp.create_dataset('wka_ip2', (nao,naux,3,nao), 'f8')
wkb_ip2_Ipk = ftmp.create_dataset('wkb_ip2', (nao,naux,3,nao), 'f8')
if hessobj.auxbasis_response > 1:
wka_ip2_P__ = np.empty((naux,3,nocca,nocca))
wkb_ip2_P__ = np.empty((naux,3,noccb,noccb))
for shl0, shl1, nL in aux_ranges:
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
p0, p1 = aux_loc[shl0], aux_loc[shl1]
int3c_ip2 = get_int3c_ip2(shls_slice)
wj_ip2[p0:p1] = np.einsum('yklp,lk->py', int3c_ip2, dm0)
if with_k:
wka_ip2_Ipk[:,p0:p1] = lib.einsum('yklp,il->ipyk', int3c_ip2, dm0a)
wkb_ip2_Ipk[:,p0:p1] = lib.einsum('yklp,il->ipyk', int3c_ip2, dm0b)
if hessobj.auxbasis_response > 1:
wka_ip2_P__[p0:p1] = lib.einsum('xuvp,ui,vj->pxij', int3c_ip2, mocca, mocca)
wkb_ip2_P__[p0:p1] = lib.einsum('xuvp,ui,vj->pxij', int3c_ip2, moccb, moccb)
int3c_ip2 = None
if hessobj.auxbasis_response > 1:
get_int3c_ipip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ipip2', 's1')
rhok0a_P__ = lib.einsum('plj,li->pij', rhok0a_Pl_, mocca)
rhok0b_P__ = lib.einsum('plj,li->pij', rhok0b_Pl_, moccb)
rho2c_0 = lib.einsum('pij,qij->pq', rhok0a_P__, rhok0a_P__)
rho2c_0 += lib.einsum('pij,qij->pq', rhok0b_P__, rhok0b_P__)
int2c_inv = np.linalg.inv(int2c)
int2c_ipip1 = auxmol.intor('int2c2e_ipip1', aosym='s1')
int2c_ip_ip = lib.einsum('xpq,qr,ysr->xyps', int2c_ip1, int2c_inv, int2c_ip1)
int2c_ip_ip -= auxmol.intor('int2c2e_ip1ip2', aosym='s1').reshape(3,3,naux,naux)
int2c = int2c_low = None
get_int3c_ipvip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ipvip1', 's1')
get_int3c_ip1ip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip1ip2', 's1')
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1, 0, nbas, 0, auxmol.nbas)
# (10|0)(0|10) without response of RI basis
if with_k:
int3c_ip1 = get_int3c_ip1(shls_slice)
vk1a = lib.einsum('xijp,ikpy->xykj', int3c_ip1, _load_dim0(rhoka_ip1_IkP, p0, p1))
vk1b = lib.einsum('xijp,ikpy->xykj', int3c_ip1, _load_dim0(rhokb_ip1_IkP, p0, p1))
vk1a[:,:,:,p0:p1] += vk2a_buf[:,:,:,p0:p1]
vk1b[:,:,:,p0:p1] += vk2b_buf[:,:,:,p0:p1]
t1 = log.timer_debug1('contracting int2e_ip1ip2 for atom %d'%ia, *t1)
int3c_ip1 = None
# (11|0)(0|00) without response of RI basis
int3c_ipvip1 = get_int3c_ipvip1(shls_slice)
vj1 = np.einsum('xijp,p->xji', int3c_ipvip1, rhoj0_P).reshape(3,3,nao,p1-p0)
if with_k:
tmp = lib.einsum('pki,ji->pkj', rhok0a_Pl_, mocca[p0:p1])
vk1a += lib.einsum('xijp,pki->xjk', int3c_ipvip1, tmp).reshape(3,3,nao,nao)
tmp = lib.einsum('pki,ji->pkj', rhok0b_Pl_, moccb[p0:p1])
vk1b += lib.einsum('xijp,pki->xjk', int3c_ipvip1, tmp).reshape(3,3,nao,nao)
t1 = log.timer_debug1('contracting int2e_ipvip1 for atom %d'%ia, *t1)
int3c_ipvip1 = tmp = None
s1ao = numpy.zeros((3,nao,nao))
s1ao[:,p0:p1] += s1a[:,p0:p1]
s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)
s1ooa = numpy.einsum('xpq,pi,qj->xij', s1ao, mocca, mocca)
s1oob = numpy.einsum('xpq,pi,qj->xij', s1ao, moccb, moccb)
e1[i0,i0] -= numpy.einsum('xypq,pq->xy', s1aa[:,:,p0:p1], dme0[p0:p1])*2
ej[i0,i0] += numpy.einsum('xypq,pq->xy', vj1_diag[:,:,p0:p1], dm0[p0:p1])*2
if with_k:
ek[i0,i0] += numpy.einsum('xypq,pq->xy', vk1a_diag[:,:,p0:p1], dm0a[p0:p1])*2
ek[i0,i0] += numpy.einsum('xypq,pq->xy', vk1b_diag[:,:,p0:p1], dm0b[p0:p1])*2
for j0, ja in enumerate(atmlst[:i0+1]):
q0, q1 = aoslices[ja][2:]
ej[i0,j0] += numpy.einsum('xypq,pq->xy', vj1[:,:,q0:q1], dm0[q0:q1,p0:p1])*2
e1[i0,j0] -= numpy.einsum('xypq,pq->xy', s1ab[:,:,p0:p1,q0:q1], dme0[p0:p1,q0:q1])*2
if with_k:
ek[i0,j0] += numpy.einsum('xypq,pq->xy', vk1a[:,:,q0:q1], dm0a[q0:q1])*2
ek[i0,j0] += numpy.einsum('xypq,pq->xy', vk1b[:,:,q0:q1], dm0b[q0:q1])*2
h1ao = hcore_deriv(ia, ja)
e1[i0,j0] += numpy.einsum('xypq,pq->xy', h1ao, dm0)
# The first order RI basis response
# (10|1)(0|00)
# (10|0)(1|0)(0|00)
# (10|0)(0|1)(0|00)
# (10|0)(1|00)
if hessobj.auxbasis_response:
wk1_Pij = rho_ip1[p0:p1].transpose(2,3,0,1)
rhoj1_P = np.einsum('pxij,ji->px', wk1_Pij, dm0[:,p0:p1])
# (10|1)(0|0)(0|00)
int3c_ip1ip2 = get_int3c_ip1ip2(shls_slice)
wj11_p = np.einsum('xijp,ji->xp', int3c_ip1ip2, dm0[:,p0:p1])
# (10|0)(1|0)(0|00)
wj0_01 = np.einsum('ypq,q->yp', int2c_ip1, rhoj0_P)
if with_k:
rhok0_P_I = lib.einsum('plj,il->pji', rhok0a_Pl_, dm0a[p0:p1])
rhok0_PJI = lib.einsum('pji,Jj->pJi', rhok0_P_I, mocca)
rhok0_P_I = lib.einsum('plj,il->pji', rhok0b_Pl_, dm0b[p0:p1])
rhok0_PJI+= lib.einsum('pji,Jj->pJi', rhok0_P_I, moccb)
wk1_pJI = lib.einsum('ypq,qji->ypji', int2c_ip1, rhok0_PJI)
wk1_IpJ = lib.einsum('ipyk,kj->ipyj', wka_ip2_Ipk[p0:p1], dm0a)
wk1_IpJ+= lib.einsum('ipyk,kj->ipyj', wkb_ip2_Ipk[p0:p1], dm0b)
rho2c_PQ = lib.einsum('pxij,qji->xqp', wk1_Pij, rhok0_PJI)
for j0, (q0, q1) in enumerate(auxslices[:,2:]):
# (10|1)(0|00)
_ej = np.einsum('xp,p->x', wj11_p[:,q0:q1], rhoj0_P[q0:q1]).reshape(3,3)
# (10|0)(0|1)(0|00)
_ej -= lib.einsum('yqp,q,px->xy', int2c_ip1[:,q0:q1], rhoj0_P[q0:q1], rhoj1_P)
# (10|0)(1|0)(0|00)
_ej -= lib.einsum('px,yp->xy', rhoj1_P[q0:q1], wj0_01[:,q0:q1])
# (10|0)(1|00)
_ej += lib.einsum('px,py->xy', rhoj1_P[q0:q1], wj_ip2[q0:q1])
if hessobj.auxbasis_response > 1:
ej[i0,j0] += _ej * 2
ej[j0,i0] += _ej.T * 2
else:
ej[i0,j0] += _ej
ej[j0,i0] += _ej.T
if with_k:
_ek = lib.einsum('xijp,pji->x', int3c_ip1ip2[:,:,:,q0:q1],
rhok0_PJI[q0:q1]).reshape(3,3)
_ek -= lib.einsum('pxij,ypji->xy', wk1_Pij[q0:q1], wk1_pJI[:,q0:q1])
_ek -= lib.einsum('xqp,yqp->xy', rho2c_PQ[:,q0:q1], int2c_ip1[:,q0:q1])
_ek += lib.einsum('pxij,ipyj->xy', wk1_Pij[q0:q1], wk1_IpJ[:,q0:q1])
if hessobj.auxbasis_response > 1:
ek[i0,j0] += _ek * 2
ek[j0,i0] += _ek.T * 2
else:
ek[i0,j0] += _ek
ek[j0,i0] += _ek.T
int3c_ip1ip2 = None
# The second order RI basis response
if hessobj.auxbasis_response > 1:
# (00|2)(0|00)
# (00|0)(2|0)(0|00)
shl0, shl1, p0, p1 = auxslices[ia]
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
int3c_ipip2 = get_int3c_ipip2(shls_slice)
ej[i0,i0] += np.einsum('xijp,ji,p->x', int3c_ipip2, dm0, rhoj0_P[p0:p1]).reshape(3,3)
ej[i0,i0] -= np.einsum('p,xpq,q->x', rhoj0_P[p0:p1], int2c_ipip1[:,p0:p1], rhoj0_P).reshape(3,3)
if with_k:
rhok0_PJI = lib.einsum('Pij,Jj,Ii->PJI', rhok0a_P__[p0:p1], mocca, mocca)
rhok0_PJI += lib.einsum('Pij,Jj,Ii->PJI', rhok0b_P__[p0:p1], moccb, moccb)
ek[i0,i0] += np.einsum('xijp,pij->x', int3c_ipip2, rhok0_PJI).reshape(3,3)
ek[i0,i0] -= np.einsum('pq,xpq->x', rho2c_0[p0:p1], int2c_ipip1[:,p0:p1]).reshape(3,3)
rhok0_PJI = None
# (00|0)(1|1)(0|00)
# (00|1)(1|0)(0|00)
# (00|1)(0|1)(0|00)
# (00|1)(1|00)
rhoj1 = lib.einsum('px,pq->xq', wj_ip2[p0:p1], int2c_inv[p0:p1])
# (00|0)(0|1)(1|0)(0|00)
rhoj0_01 = lib.einsum('xp,pq->xq', wj0_01[:,p0:p1], int2c_inv[p0:p1])
# (00|0)(1|0)(1|0)(0|00)
ip1_2c_2c = lib.einsum('xpq,qr->xpr', int2c_ip1[:,p0:p1], int2c_inv)
rhoj0_10 = lib.einsum('p,xpq->xq', rhoj0_P[p0:p1], ip1_2c_2c)
if with_k:
# (00|0)(0|1)(1|0)(0|00)
ip1_rho2c = .5 * lib.einsum('xpq,qr->xpr', int2c_ip1[:,p0:p1], rho2c_0)
rho2c_1 = lib.einsum('xrq,rp->xpq', ip1_rho2c, int2c_inv[p0:p1])
# (00|0)(1|0)(1|0)(0|00)
rho2c_1 += lib.einsum('xrp,rq->xpq', ip1_2c_2c, rho2c_0[p0:p1])
# (00|1)(0|1)(0|00)
# (00|1)(1|0)(0|00)
int3c_ip2 = get_int3c_ip2(shls_slice)
tmp = lib.einsum('xuvr,vj,ui,qij,rp->xpq', int3c_ip2,
mocca, mocca, rhok0a_P__, int2c_inv[p0:p1])
tmp+= lib.einsum('xuvr,vj,ui,qij,rp->xpq', int3c_ip2,
moccb, moccb, rhok0b_P__, int2c_inv[p0:p1])
rho2c_1 -= tmp
rho2c_1 -= tmp.transpose(0,2,1)
int3c_ip2 = tmp = None
for j0, (q0, q1) in enumerate(auxslices[:,2:]):
_ej = 0
# (00|0)(1|1)(0|00)
# (00|0)(1|0)(0|1)(0|00)
_ej += .5 * np.einsum('p,xypq,q->xy', rhoj0_P[p0:p1], int2c_ip_ip[:,:,p0:p1,q0:q1], rhoj0_P[q0:q1])
# (00|1)(1|0)(0|00)
_ej -= lib.einsum('xp,yp->xy', rhoj1[:,q0:q1], wj0_01[:,q0:q1])
# (00|1)(1|00)
_ej += .5 * lib.einsum('xp,py->xy', rhoj1[:,q0:q1], wj_ip2[q0:q1])
# (00|0)(0|1)(1|0)(0|00)
_ej += .5 * np.einsum('xp,yp->xy', rhoj0_01[:,q0:q1], wj0_01[:,q0:q1])
# (00|1)(0|1)(0|00)
_ej -= lib.einsum('yqp,q,xp->xy', int2c_ip1[:,q0:q1], rhoj0_P[q0:q1], rhoj1)
# (00|0)(1|0)(1|0)(0|00)
_ej += np.einsum('xp,yp->xy', rhoj0_10[:,q0:q1], wj0_01[:,q0:q1])
ej[i0,j0] += _ej
ej[j0,i0] += _ej.T
if with_k:
# (00|0)(1|1)(0|00)
# (00|0)(1|0)(0|1)(0|00)
_ek = .5 * np.einsum('pq,xypq->xy', rho2c_0[p0:p1,q0:q1], int2c_ip_ip[:,:,p0:p1,q0:q1])
# (00|1)(0|1)(0|00)
# (00|1)(1|0)(0|00)
# (00|0)(0|1)(1|0)(0|00)
# (00|0)(1|0)(1|0)(0|00)
_ek += np.einsum('xpq,ypq->xy', rho2c_1[:,q0:q1], int2c_ip1[:,q0:q1])
# (00|1)(1|00)
_ek += .5 * lib.einsum('pxij,pq,qyij->xy', wka_ip2_P__[p0:p1],
int2c_inv[p0:p1,q0:q1], wka_ip2_P__[q0:q1])
_ek += .5 * lib.einsum('pxij,pq,qyij->xy', wkb_ip2_P__[p0:p1],
int2c_inv[p0:p1,q0:q1], wkb_ip2_P__[q0:q1])
ek[i0,j0] += _ek
ek[j0,i0] += _ek.T
for i0, ia in enumerate(atmlst):
for j0 in range(i0):
e1[j0,i0] = e1[i0,j0].T
ej[j0,i0] = ej[i0,j0].T
ek[j0,i0] = ek[i0,j0].T
log.timer('UHF partial hessian', *time0)
return e1, ej, ek
def make_h1(hessobj, mo_coeff, mo_occ, chkfile=None, atmlst=None, verbose=None):
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
if atmlst is None:
atmlst = range(mol.natm)
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
dm0a = numpy.dot(mocca, mocca.T)
dm0b = numpy.dot(moccb, moccb.T)
hcore_deriv = hessobj.base.nuc_grad_method().hcore_generator(mol)
aoslices = mol.aoslice_by_atom()
h1aoa = [None] * mol.natm
h1aob = [None] * mol.natm
for ia, h1, vj1, vk1a, vk1b in _gen_jk(hessobj, mo_coeff, mo_occ, chkfile,
atmlst, verbose, True):
h1a = h1 + vj1 - vk1a
h1b = h1 + vj1 - vk1b
if chkfile is None:
h1aoa[ia] = h1a
h1aob[ia] = h1b
else:
lib.chkfile.save(chkfile, 'scf_f1ao/0/%d' % ia, h1a)
lib.chkfile.save(chkfile, 'scf_f1ao/1/%d' % ia, h1b)
if chkfile is None:
return (h1aoa,h1aob)
else:
return chkfile
def _gen_jk(hessobj, mo_coeff, mo_occ, chkfile=None, atmlst=None,
verbose=None, with_k=True):
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
if atmlst is None:
atmlst = range(mol.natm)
auxmol = hessobj.base.with_df.auxmol
nbas = mol.nbas
auxslices = auxmol.aoslice_by_atom()
aux_loc = auxmol.ao_loc
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
nocca = mocca.shape[1]
noccb = moccb.shape[1]
dm0a = numpy.dot(mocca, mocca.T)
dm0b = numpy.dot(moccb, moccb.T)
dm0 = dm0a + dm0b
hcore_deriv = hessobj.base.nuc_grad_method().hcore_generator(mol)
get_int3c = _int3c_wrapper(mol, auxmol, 'int3c2e', 's1')
aoslices = mol.aoslice_by_atom()
naux = auxmol.nao
ftmp = lib.H5TmpFile()
rho0_Pij = ftmp.create_group('rho0_Pij')
wj_Pij = ftmp.create_group('wj_Pij')
int2c = auxmol.intor('int2c2e', aosym='s1')
int2c_low = scipy.linalg.cho_factor(int2c, lower=True)
int2c_ip1 = auxmol.intor('int2c2e_ip1', aosym='s1')
rhoj0_P = 0
if with_k:
rhok0a_Pl_ = np.empty((naux,nao,nocca))
rhok0b_Pl_ = np.empty((naux,nao,noccb))
for i, (shl0, shl1, p0, p1) in enumerate(aoslices):
int3c = get_int3c((shl0, shl1, 0, nbas, 0, auxmol.nbas))
coef3c = scipy.linalg.cho_solve(int2c_low, int3c.reshape(-1,naux).T, overwrite_b=True)
rho0_Pij['%.4d'%i] = coef3c = coef3c.reshape(naux,p1-p0,nao)
rhoj0_P += np.einsum('pkl,kl->p', coef3c, dm0[p0:p1])
if with_k:
rhok0a_Pl_[:,p0:p1] = lib.einsum('pij,jk->pik', coef3c, mocca)
rhok0b_Pl_[:,p0:p1] = lib.einsum('pij,jk->pik', coef3c, moccb)
if hessobj.auxbasis_response:
wj_Pij['%.4d'%i] = lib.einsum('xqp,pij->qixj', int2c_ip1, coef3c)
int3c = coef3c = None
get_int3c_ip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip1', 's1')
get_int3c_ip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip2', 's1')
aux_ranges = ao2mo.outcore.balance_partition(auxmol.ao_loc, 480)
vk1a_buf = np.zeros((3,nao,nao))
vk1b_buf = np.zeros((3,nao,nao))
vj1_buf = np.zeros((mol.natm,3,nao,nao))
for shl0, shl1, nL in aux_ranges:
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
p0, p1 = aux_loc[shl0], aux_loc[shl1]
int3c_ip1 = get_int3c_ip1(shls_slice)
coef3c = _load_dim0(rho0_Pij, p0, p1)
for i, (shl0, shl1, q0, q1) in enumerate(aoslices):
wj1 = np.einsum('xijp,ji->xp', int3c_ip1[:,q0:q1], dm0[:,q0:q1])
vj1_buf[i] += np.einsum('xp,pij->xij', wj1, coef3c)
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0a_Pl_[p0:p1], mocca)
vk1a_buf += lib.einsum('xijp,plj->xil', int3c_ip1, rhok0_PlJ[p0:p1])
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0b_Pl_[p0:p1], moccb)
vk1b_buf += lib.einsum('xijp,plj->xil', int3c_ip1, rhok0_PlJ[p0:p1])
int3c_ip1 = None
vj1_buf = ftmp['vj1_buf'] = vj1_buf
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1, 0, nbas, 0, auxmol.nbas)
int3c_ip1 = get_int3c_ip1(shls_slice)
vj1 = -np.asarray(vj1_buf[ia])
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0a_Pl_, mocca[p0:p1])
vk1a = -lib.einsum('xijp,pki->xkj', int3c_ip1, rhok0_PlJ)
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0b_Pl_, moccb[p0:p1])
vk1b = -lib.einsum('xijp,pki->xkj', int3c_ip1, rhok0_PlJ)
vj1[:,p0:p1] -= np.einsum('xijp,p->xij', int3c_ip1, rhoj0_P)
vk1a[:,p0:p1] -= vk1a_buf[:,p0:p1]
vk1b[:,p0:p1] -= vk1b_buf[:,p0:p1]
if hessobj.auxbasis_response:
shl0, shl1, q0, q1 = auxslices[ia]
shls_slice = (0, nbas, 0, nbas, shl0, shl1)
int3c_ip2 = get_int3c_ip2(shls_slice)
rhoj1 = np.einsum('xijp,ji->xp', int3c_ip2, dm0)
coef3c = _load_dim0(rho0_Pij, q0, q1)
Pij = _load_dim0(wj_Pij, q0, q1)
vj1 += .5 * np.einsum('pij,xp->xij', coef3c, -rhoj1)
vj1 += .5 * np.einsum('xijp,p->xij', int3c_ip2, -rhoj0_P[q0:q1])
vj1 -= .5 * lib.einsum('xpq,q,pij->xij', int2c_ip1[:,q0:q1], -rhoj0_P, coef3c)
vj1 -= .5 * lib.einsum('pixj,p->xij', Pij, -rhoj0_P[q0:q1])
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0a_Pl_[q0:q1], mocca)
vk1a -= lib.einsum('plj,xijp->xil', rhok0_PlJ, int3c_ip2)
vk1a += lib.einsum('pjxi,plj->xil', Pij, rhok0_PlJ)
rhok0_PlJ = lib.einsum('plj,Jj->plJ', rhok0b_Pl_[q0:q1], moccb)
vk1b -= lib.einsum('plj,xijp->xil', rhok0_PlJ, int3c_ip2)
vk1b += lib.einsum('pjxi,plj->xil', Pij, rhok0_PlJ)
vj1 = vj1 + vj1.transpose(0,2,1)
vk1a = vk1a + vk1a.transpose(0,2,1)
vk1b = vk1b + vk1b.transpose(0,2,1)
h1 = hcore_deriv(ia)
yield ia, h1, vj1, vk1a, vk1b
class Hessian(uhf_hess.Hessian):
'''Non-relativistic UHF hessian'''
def __init__(self, mf):
self.auxbasis_response = 1
uhf_hess.Hessian.__init__(self, mf)
partial_hess_elec = partial_hess_elec
make_h1 = make_h1
#TODO: Insert into DF class
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
[1 , (1. , 0. , 0.000)],
[1 , (0. , 1. , 0.000)],
[1 , (0. , -1.517 , 1.177)],
[1 , (0. , 1.517 , 1.177)] ]
mol.basis = '631g'
mol.spin = 2
mol.unit = 'B'
mol.build()
mf = scf.UHF(mol).density_fit()
mf.conv_tol = 1e-14
mf.scf()
n3 = mol.natm * 3
hobj = Hessian(mf)
e2 = hobj.kernel()
ref = scf.UHF(mol).run().Hessian().kernel()
print(abs(e2-ref).max())
print(lib.finger(e2) - -0.23856667321975722)
e2 = hobj.set(auxbasis_response=2).kernel()
print(abs(e2-ref).max())
print(lib.finger(e2), - 0.72321237584876141)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Connection sub-commands"""
import io
import json
import os
import sys
from typing import Any, Dict, List
from urllib.parse import urlparse, urlunparse
from sqlalchemy.orm import exc
from airflow.cli.simple_table import AirflowConsole
from airflow.exceptions import AirflowNotFoundException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.secrets.local_filesystem import _create_connection, load_connections_dict
from airflow.utils import cli as cli_utils, yaml
from airflow.utils.cli import suppress_logs_and_warning
from airflow.utils.session import create_session
def _connection_mapper(conn: Connection) -> Dict[str, Any]:
return {
'id': conn.id,
'conn_id': conn.conn_id,
'conn_type': conn.conn_type,
'description': conn.description,
'host': conn.host,
'schema': conn.schema,
'login': conn.login,
'password': conn.password,
'port': conn.port,
'is_encrypted': conn.is_encrypted,
'is_extra_encrypted': conn.is_encrypted,
'extra_dejson': conn.extra_dejson,
'get_uri': conn.get_uri(),
}
@suppress_logs_and_warning
def connections_get(args):
"""Get a connection."""
try:
conn = BaseHook.get_connection(args.conn_id)
except AirflowNotFoundException:
raise SystemExit("Connection not found.")
AirflowConsole().print_as(
data=[conn],
output=args.output,
mapper=_connection_mapper,
)
@suppress_logs_and_warning
def connections_list(args):
"""Lists all connections at the command line"""
with create_session() as session:
query = session.query(Connection)
if args.conn_id:
query = query.filter(Connection.conn_id == args.conn_id)
conns = query.all()
AirflowConsole().print_as(
data=conns,
output=args.output,
mapper=_connection_mapper,
)
def _format_connections(conns: List[Connection], fmt: str) -> str:
if fmt == '.env':
connections_env = ""
for conn in conns:
connections_env += f"{conn.conn_id}={conn.get_uri()}\n"
return connections_env
connections_dict = {}
for conn in conns:
connections_dict[conn.conn_id] = {
'conn_type': conn.conn_type,
'description': conn.description,
'host': conn.host,
'login': conn.login,
'password': conn.password,
'schema': conn.schema,
'port': conn.port,
'extra': conn.extra,
}
if fmt == '.yaml':
return yaml.dump(connections_dict)
if fmt == '.json':
return json.dumps(connections_dict, indent=2)
return json.dumps(connections_dict)
def _is_stdout(fileio: io.TextIOWrapper) -> bool:
return fileio.name == '<stdout>'
def _valid_uri(uri: str) -> bool:
"""Check if a URI is valid, by checking if both scheme and netloc are available"""
uri_parts = urlparse(uri)
return uri_parts.scheme != '' and uri_parts.netloc != ''
def connections_export(args):
"""Exports all connections to a file"""
allowed_formats = ['.yaml', '.json', '.env']
provided_format = None if args.format is None else f".{args.format.lower()}"
default_format = provided_format or '.json'
with create_session() as session:
if _is_stdout(args.file):
filetype = default_format
elif provided_format is not None:
filetype = provided_format
else:
_, filetype = os.path.splitext(args.file.name)
filetype = filetype.lower()
if filetype not in allowed_formats:
raise SystemExit(
f"Unsupported file format. The file must have "
f"the extension {', '.join(allowed_formats)}."
)
connections = session.query(Connection).order_by(Connection.conn_id).all()
msg = _format_connections(connections, filetype)
args.file.write(msg)
if _is_stdout(args.file):
print("Connections successfully exported.", file=sys.stderr)
else:
print(f"Connections successfully exported to {args.file.name}.")
alternative_conn_specs = ['conn_type', 'conn_host', 'conn_login', 'conn_password', 'conn_schema', 'conn_port']
@cli_utils.action_logging
def connections_add(args):
"""Adds new connection"""
# Check that the conn_id and conn_uri args were passed to the command:
missing_args = []
invalid_args = []
if args.conn_uri:
if not _valid_uri(args.conn_uri):
raise SystemExit(f'The URI provided to --conn-uri is invalid: {args.conn_uri}')
for arg in alternative_conn_specs:
if getattr(args, arg) is not None:
invalid_args.append(arg)
elif not args.conn_type:
missing_args.append('conn-uri or conn-type')
if missing_args:
raise SystemExit(f'The following args are required to add a connection: {missing_args!r}')
if invalid_args:
raise SystemExit(
f'The following args are not compatible with the '
f'add flag and --conn-uri flag: {invalid_args!r}'
)
if args.conn_uri:
new_conn = Connection(conn_id=args.conn_id, description=args.conn_description, uri=args.conn_uri)
else:
new_conn = Connection(
conn_id=args.conn_id,
conn_type=args.conn_type,
description=args.conn_description,
host=args.conn_host,
login=args.conn_login,
password=args.conn_password,
schema=args.conn_schema,
port=args.conn_port,
)
if args.conn_extra is not None:
new_conn.set_extra(args.conn_extra)
with create_session() as session:
if not session.query(Connection).filter(Connection.conn_id == new_conn.conn_id).first():
session.add(new_conn)
msg = 'Successfully added `conn_id`={conn_id} : {uri}'
msg = msg.format(
conn_id=new_conn.conn_id,
uri=args.conn_uri
or urlunparse(
(
args.conn_type,
'{login}:{password}@{host}:{port}'.format(
login=args.conn_login or '',
password='******' if args.conn_password else '',
host=args.conn_host or '',
port=args.conn_port or '',
),
args.conn_schema or '',
'',
'',
'',
)
),
)
print(msg)
else:
msg = f'A connection with `conn_id`={new_conn.conn_id} already exists.'
raise SystemExit(msg)
@cli_utils.action_logging
def connections_delete(args):
"""Deletes connection from DB"""
with create_session() as session:
try:
to_delete = session.query(Connection).filter(Connection.conn_id == args.conn_id).one()
except exc.NoResultFound:
raise SystemExit(f'Did not find a connection with `conn_id`={args.conn_id}')
except exc.MultipleResultsFound:
raise SystemExit(f'Found more than one connection with `conn_id`={args.conn_id}')
else:
session.delete(to_delete)
print(f"Successfully deleted connection with `conn_id`={to_delete.conn_id}")
@cli_utils.action_logging
def connections_import(args):
"""Imports connections from a given file"""
if os.path.exists(args.file):
_import_helper(args.file)
else:
raise SystemExit("Missing connections file.")
def _import_helper(file_path):
"""Helps import connections from a file"""
connections_dict = load_connections_dict(file_path)
with create_session() as session:
for conn_id, conn_values in connections_dict.items():
if session.query(Connection).filter(Connection.conn_id == conn_id).first():
print(f'Could not import connection {conn_id}: connection already exists.')
continue
allowed_fields = [
'extra',
'description',
'conn_id',
'login',
'conn_type',
'host',
'password',
'schema',
'port',
'uri',
'extra_dejson',
]
filtered_connection_values = {
key: value for key, value in conn_values.items() if key in allowed_fields
}
connection = _create_connection(conn_id, filtered_connection_values)
session.add(connection)
session.commit()
print(f'Imported connection {conn_id}')
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from os.path import abspath, dirname
class Migration(SchemaMigration):
def forwards(self, orm):
sql_up_migration_file = "{}/migration_0111_up.sql".format(
abspath(dirname(__file__)))
db.execute(open(sql_up_migration_file).read())
def backwards(self, orm):
sql_down_migration_file = "{}/migration_0111_down.sql".format(
abspath(dirname(__file__)))
db.execute(open(sql_down_migration_file).read())
models = {
u'account.organization': {
'Meta': {'object_name': 'Organization'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grafana_datasource': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'grafana_hostgroup': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_orgid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'team_organization'", 'on_delete': 'models.PROTECT', 'to': u"orm['account.Organization']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'backup_hour': ('django.db.models.fields.IntegerField', [], {}),
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'engine_patch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EnginePatch']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'maintenance_day': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'maintenance_window': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'infra'", 'null': 'True', 'to': u"orm['physical.Pool']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssl_mode': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'major_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'minor_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginepatch': {
'Meta': {'object_name': 'EnginePatch'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'patchs'", 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_initial_patch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patch_path': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_version': ('django.db.models.fields.PositiveIntegerField', [], {}),
'required_disk_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provisioner': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'stage': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'root_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ssl_expire_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'persistense_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_persisted_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.pool': {
'Meta': {'object_name': 'Pool'},
'cluster_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cluster_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cluster_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dbaas_token': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'pools'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'project_id': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'rancher_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rancher_token': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'storageclass': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'pools'", 'symmetrical': 'False', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vpc': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recreate_slave': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'configure_log': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'original_vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# RecordingChooser tests
import unittest
import sys ; sys.path.append("..")
from TopologyPVR import RecordingChooser
from TopologyPVR import directionNormaliser
from TopologyPVR import timestamper
import Axon
from Axon.Ipc import producerFinished, shutdownMicroprocess
class RecordingChooser_Internal_InitialisationTests(unittest.TestCase):
def test_Instantiate_NoArgs(self):
"__init__ - Creating is fine"
x=RecordingChooser()
def test_Instiantiate_WithWinding(self):
"__init__ - Creating with winding=true is fine"
x=RecordingChooser(winding=True)
class RecordingChooser_Internal_IterateTests(unittest.TestCase):
def __preroll(self, *arg, **argd):
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
chooser = RecordingChooser(*arg, **argd).activate()
target = Axon.Component.component().activate()
chooser.link( (chooser, "outbox"), (target, "inbox") )
chooser.link( (chooser, "signal"), (target, "control") )
execute = Axon.Scheduler.scheduler.run.main()
return chooser, target, execute
def test_shutdown(self):
"""Shuts down in response to a shutdownMicroprocess message"""
for msg in [producerFinished(self), shutdownMicroprocess(self)]:
chooser = RecordingChooser().activate()
for _ in xrange(0,10):
chooser.next()
self.assert_(0==len(chooser.outboxes["outbox"]))
self.assert_(0==len(chooser.outboxes["signal"]))
chooser._deliver( msg, "control" )
try:
for _ in xrange(0,10):
chooser.next()
self.fail()
except StopIteration:
pass
self.assert_(0==len(chooser.outboxes["outbox"]))
self.assert_(1==len(chooser.outboxes["signal"]))
received = chooser._collect("signal")
self.assert_( msg == received )
def test_nooutputifempty(self):
"""Does not output anything if empty"""
chooser, target, execute = self.__preroll()
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_simpleiterateforwards(self):
"""If filled with 'next' items, then you iterate forwards, you get them all out, but no more than that"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_iterateforwards(self):
"""You can continue to fill with items whilst getting them out"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
payload2 = ['p','q','7','36']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload2:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload2:
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_deferrediterateforwards(self):
"""If you iterate beyond the end, then as new items arrive they'll be output"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_iterateforwardsbackwards(self):
"""You can iterate forwards, then back then forwards, etc"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in reversed(payload[:-1]): # nb last item not repeated
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload[1:]: # nb first item not repeated
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
def test_backtrackatendendstop(self):
"""If you iterate forwards past the end, then reverse, you won't get the last item repeated"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in reversed(payload[:-1]):
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
def test_backtrackatstartendstop(self):
"""If you iterate backwards past the front, then go forwards again, you won't get the first item repeated"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "prevItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in reversed(payload[:-1]):
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
def test_jumpToFirstLast(self):
"""You can jump to the first or last item. With winding off, the item will be re-emitted if youre' already there"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
for item in payload:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("FIRST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(payload[0] == target.recv("inbox"))
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("FIRST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(payload[0] == target.recv("inbox"))
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("LAST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(payload[-1] == target.recv("inbox"))
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("FIRST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(payload[0] == target.recv("inbox"))
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_winding(self):
"""With winding on, jumping will wind to the first or last item, emitting all items on the way. If you're already there, nothing is emitted"""
chooser, target, execute = self.__preroll(winding=True)
payload = ['a','b','1','8']
for item in payload:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("FIRST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(payload[0] == target.recv("inbox"))
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("FIRST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("LAST", "inbox")
for e in xrange(1,100): execute.next()
for item in payload[1:]:
self.assert_(target.dataReady("inbox"))
self.assert_(item == target.recv("inbox"))
self.assert_(not target.dataReady("inbox"))
chooser._deliver("LAST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("FIRST", "inbox")
for e in xrange(1,100): execute.next()
for item in reversed(payload[:-1]):
self.assert_(target.dataReady("inbox"))
self.assert_(item == target.recv("inbox"))
self.assert_(not target.dataReady("inbox"))
def test_simpleiteratebackwards(self):
"""If filled with 'prev' items, then you iterate backwards, you get them all out, but no more than that"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "prevItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_iteratebackwards(self):
"""You can continue to fill with items (backwards) whilst getting them out"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
payload2 = ['p','q','7','36']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "prevItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload2:
chooser._deliver(item, "prevItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload2:
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_deferrediteratebackwards(self):
"""If you iterate beyond the end, then as new items arrive they'll be output"""
chooser, target, execute = self.__preroll()
payload = ['a','b','1','8']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in payload:
chooser._deliver(item, "prevItems")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"), "Expected item to be ready")
self.assert_(item == target.recv("inbox"), "Expected "+str(item)+" to be emitted")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_startinmiddle(self):
"""If you add items to the front and back, then iterating, you start in the middle"""
chooser, target, execute = self.__preroll()
prev = ['a','b','c','d']
next = ['1','2','3','4','5']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in prev:
chooser._deliver(item, "prevItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in next:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in next:
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(item == target.recv("inbox"))
self.assert_(not target.dataReady("inbox"))
for item in reversed(next[:-1]):
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(item == target.recv("inbox"))
self.assert_(not target.dataReady("inbox"))
for item in prev:
chooser._deliver("PREV", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(item == target.recv("inbox"))
self.assert_(not target.dataReady("inbox"))
for item in reversed(prev[:-1]):
chooser._deliver("NEXT", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(item == target.recv("inbox"))
self.assert_(not target.dataReady("inbox"))
def test_startinmiddle_jumptofirst(self):
"""If you add items to the front and back, then jump to first, you start at that point"""
chooser, target, execute = self.__preroll()
prev = ['a','b','c','d']
next = ['1','2','3','4','5']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in prev:
chooser._deliver(item, "prevItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in next:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("FIRST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(prev[-1] == target.recv("inbox"))
def test_startinmiddle_jumptolast(self):
"""If you add items to the front and back, then jump to last, you start at that point"""
chooser, target, execute = self.__preroll()
prev = ['a','b','c','d']
next = ['1','2','3','4','5']
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in prev:
chooser._deliver(item, "prevItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
for item in next:
chooser._deliver(item, "nextItems")
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
chooser._deliver("LAST", "inbox")
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(next[-1] == target.recv("inbox"))
class timestamperTests(unittest.TestCase):
def __preroll(self, *arg, **argd):
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
ontest = timestamper(*arg, **argd).activate()
target = Axon.Component.component().activate()
ontest.link( (ontest, "outbox"), (target, "inbox") )
ontest.link( (ontest, "signal"), (target, "control") )
execute = Axon.Scheduler.scheduler.run.main()
return ontest, target, execute
def test_shutdown(self):
"""Shuts down in response to a shutdownMicroprocess message"""
for msg in [producerFinished(self), shutdownMicroprocess(self)]:
ontest = timestamper().activate()
for _ in xrange(0,10):
ontest.next()
self.assert_(0==len(ontest.outboxes["outbox"]))
self.assert_(0==len(ontest.outboxes["signal"]))
ontest._deliver( msg, "control" )
try:
for _ in xrange(0,10):
ontest.next()
self.fail()
except StopIteration:
pass
self.assert_(0==len(ontest.outboxes["outbox"]))
self.assert_(1==len(ontest.outboxes["signal"]))
received = ontest._collect("signal")
self.assert_( msg == received )
def test_noinputnooutput(self):
"""No input, no output"""
timestamper, target, execute = self.__preroll()
for i in range(0,100):
execute.next()
self.assert_( not target.dataReady("inbox") )
self.assert_( not target.dataReady("control") )
def test_outputisinputtupledwithtimestamp(self):
"""Output is (timestamp, input)"""
timestamper, target, execute = self.__preroll()
data = ("flurble", "plig", 7)
timestamper._deliver( data, "inbox")
for i in range(0,10): execute.next()
self.assert_( target.dataReady("inbox") )
(ts, d) = target.recv("inbox")
self.assert_( d == data )
def test_timestampsascend(self):
"""Timestamps ascend. No two are the same."""
timestamper, target, execute = self.__preroll()
items1 = [ 'a', ('b',7), 'c', 'd', 'e' ]
items2 = [ 9, 4, 2, None ]
for item in items1:
timestamper._deliver( item, "inbox")
for i in range(0,100): execute.next()
for item in items2:
timestamper._deliver( item, "inbox")
for i in range(0,10): execute.next()
self.assert_( target.dataReady("inbox") )
(prevts, d) = target.recv("inbox")
self.assert_( d == items1[0] )
for item in items1[1:] + items2:
self.assert_( target.dataReady("inbox") )
(ts, d) = target.recv("inbox")
self.assert_( d == item )
self.assert_( prevts < ts )
prevts = ts
class directionNormaliser_Tests(unittest.TestCase):
def __preroll(self, *arg, **argd):
Axon.Scheduler.scheduler.run = Axon.Scheduler.scheduler()
normaliser = directionNormaliser(*arg, **argd).activate()
target = Axon.Component.component().activate()
normaliser.link( (normaliser, "outbox"), (target, "inbox") )
normaliser.link( (normaliser, "signal"), (target, "control") )
execute = Axon.Scheduler.scheduler.run.main()
return normaliser, target, execute
def test_shutdown(self):
"""Shuts down in response to a shutdownMicroprocess message"""
for msg in [producerFinished(self), shutdownMicroprocess(self)]:
normaliser = directionNormaliser().activate()
for _ in xrange(0,10):
normaliser.next()
self.assert_(0==len(normaliser.outboxes["outbox"]))
self.assert_(0==len(normaliser.outboxes["signal"]))
normaliser._deliver( msg, "control" )
try:
for _ in xrange(0,10):
normaliser.next()
self.fail()
except StopIteration:
pass
self.assert_(0==len(normaliser.outboxes["outbox"]))
self.assert_(1==len(normaliser.outboxes["signal"]))
received = normaliser._collect("signal")
self.assert_( msg == received )
def test_behaviour1(self):
"""Does not output anything if nothing is passed in"""
normaliser, target, execute = self.__preroll()
for e in xrange(1,10): execute.next()
self.assert_(not target.dataReady("inbox"))
def test_behaviour1a(self):
"""First the first item, always the 2nd item of the tuple is returned"""
normaliser, target, execute = self.__preroll()
item = (5,'p','P')
normaliser._deliver( item, "inbox" )
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(target.recv("inbox") == item[1])
def test_behaviour2(self):
"""When you go forwards through timestamps, the 2nd item of the tuple is returned"""
normaliser, target, execute = self.__preroll()
for item in [ (1,'a','A'), (2,'b','B'), (3,'c','C'), (4,'d','D') ]:
normaliser._deliver( item, "inbox" )
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(target.recv("inbox") == item[1])
def test_behaviour3(self):
"""When you go backwards through timestamps, the 3nd item of the tuple is returned."""
normaliser, target, execute = self.__preroll()
item = (5,'p','P')
normaliser._deliver( item, "inbox" )
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(target.recv("inbox") == item[1])
for item in [ (4,'a','A'), (3,'b','B'), (2,'c','C'), (1,'d','D') ]:
normaliser._deliver( item, "inbox" )
for e in xrange(1,10): execute.next()
self.assert_(target.dataReady("inbox"))
self.assert_(target.recv("inbox") == item[2])
if __name__=='__main__':
unittest.main()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import shutil
import tensorflow as tf
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
def _AddEvents(path):
if not tf.gfile.IsDirectory(path):
tf.gfile.MakeDirs(path)
fpath = os.path.join(path, 'hypothetical.tfevents.out')
with tf.gfile.GFile(fpath, 'w') as f:
f.write('')
return fpath
def _CreateCleanDirectory(path):
if tf.gfile.IsDirectory(path):
tf.gfile.DeleteRecursively(path)
tf.gfile.MkDir(path)
class _FakeAccumulator(object):
def __init__(self, path):
"""Constructs a fake accumulator with some fake events.
Args:
path: The path for the run that this accumulator is for.
"""
self._path = path
self.reload_called = False
self._plugin_to_tag_to_content = {
'baz_plugin': {
'foo': 'foo_content',
'bar': 'bar_content',
}
}
def Tags(self):
return {}
def FirstEventTimestamp(self):
return 0
def _TagHelper(self, tag_name, enum):
if tag_name not in self.Tags()[enum]:
raise KeyError
return ['%s/%s' % (self._path, tag_name)]
def Tensors(self, tag_name):
return self._TagHelper(tag_name, event_accumulator.TENSORS)
def PluginTagToContent(self, plugin_name):
# We pre-pend the runs with the path and '_' so that we can verify that the
# tags are associated with the correct runs.
return {
self._path + '_' + run: content_mapping
for (run, content_mapping
) in self._plugin_to_tag_to_content[plugin_name].items()
}
def Reload(self):
self.reload_called = True
def _GetFakeAccumulator(path,
size_guidance=None,
tensor_size_guidance=None,
purge_orphaned_data=None):
del size_guidance, tensor_size_guidance, purge_orphaned_data # Unused.
return _FakeAccumulator(path)
class EventMultiplexerTest(tf.test.TestCase):
def setUp(self):
super(EventMultiplexerTest, self).setUp()
self.stubs = tf.test.StubOutForTesting()
self.stubs.Set(event_accumulator, 'EventAccumulator', _GetFakeAccumulator)
def tearDown(self):
self.stubs.CleanUp()
def testEmptyLoader(self):
"""Tests empty EventMultiplexer creation."""
x = event_multiplexer.EventMultiplexer()
self.assertEqual(x.Runs(), {})
def testRunNamesRespected(self):
"""Tests two EventAccumulators inserted/accessed in EventMultiplexer."""
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertItemsEqual(sorted(x.Runs().keys()), ['run1', 'run2'])
self.assertEqual(x.GetAccumulator('run1')._path, 'path1')
self.assertEqual(x.GetAccumulator('run2')._path, 'path2')
def testReload(self):
"""EventAccumulators should Reload after EventMultiplexer call it."""
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertFalse(x.GetAccumulator('run1').reload_called)
self.assertFalse(x.GetAccumulator('run2').reload_called)
x.Reload()
self.assertTrue(x.GetAccumulator('run1').reload_called)
self.assertTrue(x.GetAccumulator('run2').reload_called)
def testPluginRunToTagToContent(self):
"""Tests the method that produces the run to tag to content mapping."""
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertDictEqual({
'run1': {
'path1_foo': 'foo_content',
'path1_bar': 'bar_content',
},
'run2': {
'path2_foo': 'foo_content',
'path2_bar': 'bar_content',
}
}, x.PluginRunToTagToContent('baz_plugin'))
def testExceptions(self):
"""KeyError should be raised when accessing non-existing keys."""
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
with self.assertRaises(KeyError):
x.Tensors('sv1', 'xxx')
def testInitialization(self):
"""Tests EventMultiplexer is created properly with its params."""
x = event_multiplexer.EventMultiplexer()
self.assertEqual(x.Runs(), {})
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertItemsEqual(x.Runs(), ['run1', 'run2'])
self.assertEqual(x.GetAccumulator('run1')._path, 'path1')
self.assertEqual(x.GetAccumulator('run2')._path, 'path2')
def testAddRunsFromDirectory(self):
"""Tests AddRunsFromDirectory function.
Tests the following scenarios:
- When the directory does not exist.
- When the directory is empty.
- When the directory has empty subdirectory.
- Contains proper EventAccumulators after adding events.
"""
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
fakedir = join(tmpdir, 'fake_accumulator_directory')
realdir = join(tmpdir, 'real_accumulator_directory')
self.assertEqual(x.Runs(), {})
x.AddRunsFromDirectory(fakedir)
self.assertEqual(x.Runs(), {}, 'loading fakedir had no effect')
_CreateCleanDirectory(realdir)
x.AddRunsFromDirectory(realdir)
self.assertEqual(x.Runs(), {}, 'loading empty directory had no effect')
path1 = join(realdir, 'path1')
tf.gfile.MkDir(path1)
x.AddRunsFromDirectory(realdir)
self.assertEqual(x.Runs(), {}, 'creating empty subdirectory had no effect')
_AddEvents(path1)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1'], 'loaded run: path1')
loader1 = x.GetAccumulator('path1')
self.assertEqual(loader1._path, path1, 'has the correct path')
path2 = join(realdir, 'path2')
_AddEvents(path2)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1', 'path2'])
self.assertEqual(
x.GetAccumulator('path1'), loader1, 'loader1 not regenerated')
path2_2 = join(path2, 'path2')
_AddEvents(path2_2)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1', 'path2', 'path2/path2'])
self.assertEqual(
x.GetAccumulator('path2/path2')._path, path2_2, 'loader2 path correct')
def testAddRunsFromDirectoryThatContainsEvents(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
self.assertEqual(x.Runs(), {})
_AddEvents(realdir)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.'])
subdir = join(realdir, 'subdir')
_AddEvents(subdir)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.', 'subdir'])
def testAddRunsFromDirectoryWithRunNames(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
self.assertEqual(x.Runs(), {})
_AddEvents(realdir)
x.AddRunsFromDirectory(realdir, 'foo')
self.assertItemsEqual(x.Runs(), ['foo/.'])
subdir = join(realdir, 'subdir')
_AddEvents(subdir)
x.AddRunsFromDirectory(realdir, 'foo')
self.assertItemsEqual(x.Runs(), ['foo/.', 'foo/subdir'])
def testAddRunsFromDirectoryWalksTree(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
_AddEvents(realdir)
sub = join(realdir, 'subdirectory')
sub1 = join(sub, '1')
sub2 = join(sub, '2')
sub1_1 = join(sub1, '1')
_AddEvents(sub1)
_AddEvents(sub2)
_AddEvents(sub1_1)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.', 'subdirectory/1', 'subdirectory/2',
'subdirectory/1/1'])
def testAddRunsFromDirectoryThrowsException(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
filepath = _AddEvents(tmpdir)
with self.assertRaises(ValueError):
x.AddRunsFromDirectory(filepath)
def testAddRun(self):
x = event_multiplexer.EventMultiplexer()
x.AddRun('run1_path', 'run1')
run1 = x.GetAccumulator('run1')
self.assertEqual(sorted(x.Runs().keys()), ['run1'])
self.assertEqual(run1._path, 'run1_path')
x.AddRun('run1_path', 'run1')
self.assertEqual(run1, x.GetAccumulator('run1'), 'loader not recreated')
x.AddRun('run2_path', 'run1')
new_run1 = x.GetAccumulator('run1')
self.assertEqual(new_run1._path, 'run2_path')
self.assertNotEqual(run1, new_run1)
x.AddRun('runName3')
self.assertItemsEqual(sorted(x.Runs().keys()), ['run1', 'runName3'])
self.assertEqual(x.GetAccumulator('runName3')._path, 'runName3')
def testAddRunMaintainsLoading(self):
x = event_multiplexer.EventMultiplexer()
x.Reload()
x.AddRun('run1')
x.AddRun('run2')
self.assertTrue(x.GetAccumulator('run1').reload_called)
self.assertTrue(x.GetAccumulator('run2').reload_called)
class EventMultiplexerWithRealAccumulatorTest(tf.test.TestCase):
def testDeletingDirectoryRemovesRun(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
run1_dir = join(tmpdir, 'run1')
run2_dir = join(tmpdir, 'run2')
run3_dir = join(tmpdir, 'run3')
for dirname in [run1_dir, run2_dir, run3_dir]:
_AddEvents(dirname)
x.AddRun(run1_dir, 'run1')
x.AddRun(run2_dir, 'run2')
x.AddRun(run3_dir, 'run3')
x.Reload()
# Delete the directory, then reload.
shutil.rmtree(run2_dir)
x.Reload()
self.assertNotIn('run2', x.Runs().keys())
if __name__ == '__main__':
tf.test.main()
| |
#!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script for publishing WebRTC AAR on Bintray.
Set BINTRAY_USER and BINTRAY_API_KEY environment variables before running
this script for authentication.
"""
import argparse
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))
CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
sys.path.append(os.path.join(CHECKOUT_ROOT, 'third_party'))
import requests
import jinja2
sys.path.append(os.path.join(CHECKOUT_ROOT, 'tools_webrtc'))
from android.build_aar import BuildAar
ARCHS = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64']
MAVEN_REPOSITORY = 'https://google.bintray.com/webrtc'
API = 'https://api.bintray.com'
PACKAGE_PATH = 'google/webrtc/google-webrtc'
CONTENT_API = API + '/content/' + PACKAGE_PATH
PACKAGES_API = API + '/packages/' + PACKAGE_PATH
GROUP_ID = 'org/webrtc'
ARTIFACT_ID = 'google-webrtc'
COMMIT_POSITION_REGEX = r'^Cr-Commit-Position: refs/heads/master@{#(\d+)}$'
API_TIMEOUT_SECONDS = 10.0
UPLOAD_TRIES = 3
# The sleep time is increased exponentially.
UPLOAD_RETRY_BASE_SLEEP_SECONDS = 2.0
GRADLEW_BIN = os.path.join(CHECKOUT_ROOT,
'examples/androidtests/third_party/gradle/gradlew')
ADB_BIN = os.path.join(CHECKOUT_ROOT,
'third_party/android_sdk/public/platform-tools/adb')
AAR_PROJECT_DIR = os.path.join(CHECKOUT_ROOT, 'examples/aarproject')
AAR_PROJECT_GRADLE = os.path.join(AAR_PROJECT_DIR, 'build.gradle')
AAR_PROJECT_APP_GRADLE = os.path.join(AAR_PROJECT_DIR, 'app', 'build.gradle')
AAR_PROJECT_DEPENDENCY = "implementation 'org.webrtc:google-webrtc:1.0.+'"
AAR_PROJECT_VERSION_DEPENDENCY = "implementation 'org.webrtc:google-webrtc:%s'"
def _ParseArgs():
parser = argparse.ArgumentParser(description='Releases WebRTC on Bintray.')
parser.add_argument('--use-goma', action='store_true', default=False,
help='Use goma.')
parser.add_argument('--skip-tests', action='store_true', default=False,
help='Skips running the tests.')
parser.add_argument('--publish', action='store_true', default=False,
help='Automatically publishes the library if the tests pass.')
parser.add_argument('--build-dir', default=None,
help='Temporary directory to store the build files. If not specified, '
'a new directory will be created.')
parser.add_argument('--verbose', action='store_true', default=False,
help='Debug logging.')
return parser.parse_args()
def _GetCommitHash():
commit_hash = subprocess.check_output(
['git', 'rev-parse', 'HEAD'], cwd=CHECKOUT_ROOT).strip()
return commit_hash
def _GetCommitPos():
commit_message = subprocess.check_output(
['git', 'rev-list', '--format=%B', '--max-count=1', 'HEAD'],
cwd=CHECKOUT_ROOT)
commit_pos_match = re.search(
COMMIT_POSITION_REGEX, commit_message, re.MULTILINE)
if not commit_pos_match:
raise Exception('Commit position not found in the commit message: %s'
% commit_message)
return commit_pos_match.group(1)
def _UploadFile(user, password, filename, version, target_file):
# URL is of format:
# <repository_api>/<version>/<group_id>/<artifact_id>/<version>/<target_file>
# Example:
# https://api.bintray.com/content/google/webrtc/google-webrtc/1.0.19742/org/webrtc/google-webrtc/1.0.19742/google-webrtc-1.0.19742.aar
target_dir = version + '/' + GROUP_ID + '/' + ARTIFACT_ID + '/' + version
target_path = target_dir + '/' + target_file
url = CONTENT_API + '/' + target_path
logging.info('Uploading %s to %s', filename, url)
with open(filename) as fh:
file_data = fh.read()
for attempt in xrange(UPLOAD_TRIES):
try:
response = requests.put(url, data=file_data, auth=(user, password),
timeout=API_TIMEOUT_SECONDS)
break
except requests.exceptions.Timeout as e:
logging.warning('Timeout while uploading: %s', e)
time.sleep(UPLOAD_RETRY_BASE_SLEEP_SECONDS ** attempt)
else:
raise Exception('Failed to upload %s' % filename)
if not response.ok:
raise Exception('Failed to upload %s. Response: %s' % (filename, response))
logging.info('Uploaded %s: %s', filename, response)
def _GeneratePom(target_file, version, commit):
env = jinja2.Environment(
loader=jinja2.PackageLoader('release_aar'),
)
template = env.get_template('pom.jinja')
pom = template.render(version=version, commit=commit)
with open(target_file, 'w') as fh:
fh.write(pom)
def _TestAAR(tmp_dir, username, password, version):
"""Runs AppRTCMobile tests using the AAR. Returns true if the tests pass."""
logging.info('Testing library.')
env = jinja2.Environment(
loader=jinja2.PackageLoader('release_aar'),
)
gradle_backup = os.path.join(tmp_dir, 'build.gradle.backup')
app_gradle_backup = os.path.join(tmp_dir, 'app-build.gradle.backup')
# Make backup copies of the project files before modifying them.
shutil.copy2(AAR_PROJECT_GRADLE, gradle_backup)
shutil.copy2(AAR_PROJECT_APP_GRADLE, app_gradle_backup)
try:
maven_repository_template = env.get_template('maven-repository.jinja')
maven_repository = maven_repository_template.render(
url=MAVEN_REPOSITORY, username=username, password=password)
# Append Maven repository to build file to download unpublished files.
with open(AAR_PROJECT_GRADLE, 'a') as gradle_file:
gradle_file.write(maven_repository)
# Read app build file.
with open(AAR_PROJECT_APP_GRADLE, 'r') as gradle_app_file:
gradle_app = gradle_app_file.read()
if AAR_PROJECT_DEPENDENCY not in gradle_app:
raise Exception(
'%s not found in the build file.' % AAR_PROJECT_DEPENDENCY)
# Set version to the version to be tested.
target_dependency = AAR_PROJECT_VERSION_DEPENDENCY % version
gradle_app = gradle_app.replace(AAR_PROJECT_DEPENDENCY, target_dependency)
# Write back.
with open(AAR_PROJECT_APP_GRADLE, 'w') as gradle_app_file:
gradle_app_file.write(gradle_app)
# Uninstall any existing version of AppRTCMobile.
logging.info('Uninstalling previous AppRTCMobile versions. It is okay for '
'these commands to fail if AppRTCMobile is not installed.')
subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc'])
subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc.test'])
# Run tests.
try:
# First clean the project.
subprocess.check_call([GRADLEW_BIN, 'clean'], cwd=AAR_PROJECT_DIR)
# Then run the tests.
subprocess.check_call([GRADLEW_BIN, 'connectedDebugAndroidTest'],
cwd=AAR_PROJECT_DIR)
except subprocess.CalledProcessError:
logging.exception('Test failure.')
return False # Clean or tests failed
return True # Tests pass
finally:
# Restore backups.
shutil.copy2(gradle_backup, AAR_PROJECT_GRADLE)
shutil.copy2(app_gradle_backup, AAR_PROJECT_APP_GRADLE)
def _PublishAAR(user, password, version, additional_args):
args = {
'publish_wait_for_secs': 0 # Publish asynchronously.
}
args.update(additional_args)
url = CONTENT_API + '/' + version + '/publish'
response = requests.post(url, data=json.dumps(args), auth=(user, password),
timeout=API_TIMEOUT_SECONDS)
if not response.ok:
raise Exception('Failed to publish. Response: %s' % response)
def _DeleteUnpublishedVersion(user, password, version):
url = PACKAGES_API + '/versions/' + version
response = requests.get(url, auth=(user, password),
timeout=API_TIMEOUT_SECONDS)
if not response.ok:
raise Exception('Failed to get version info. Response: %s' % response)
version_info = json.loads(response.content)
if version_info['published']:
logging.info('Version has already been published, not deleting.')
return
logging.info('Deleting unpublished version.')
response = requests.delete(url, auth=(user, password),
timeout=API_TIMEOUT_SECONDS)
if not response.ok:
raise Exception('Failed to delete version. Response: %s' % response)
def ReleaseAar(use_goma, skip_tests, publish, build_dir):
version = '1.0.' + _GetCommitPos()
commit = _GetCommitHash()
logging.info('Releasing AAR version %s with hash %s', version, commit)
user = os.environ.get('BINTRAY_USER', None)
api_key = os.environ.get('BINTRAY_API_KEY', None)
if not user or not api_key:
raise Exception('Environment variables BINTRAY_USER and BINTRAY_API_KEY '
'must be defined.')
# If build directory is not specified, create a temporary directory.
use_tmp_dir = not build_dir
if use_tmp_dir:
build_dir = tempfile.mkdtemp()
try:
base_name = ARTIFACT_ID + '-' + version
aar_file = os.path.join(build_dir, base_name + '.aar')
third_party_licenses_file = os.path.join(build_dir, 'LICENSE.md')
pom_file = os.path.join(build_dir, base_name + '.pom')
logging.info('Building at %s', build_dir)
BuildAar(ARCHS, aar_file,
use_goma=use_goma,
ext_build_dir=os.path.join(build_dir, 'aar-build'))
_GeneratePom(pom_file, version, commit)
_UploadFile(user, api_key, aar_file, version, base_name + '.aar')
_UploadFile(user, api_key, third_party_licenses_file, version,
'THIRD_PARTY_LICENSES.md')
_UploadFile(user, api_key, pom_file, version, base_name + '.pom')
tests_pass = skip_tests or _TestAAR(build_dir, user, api_key, version)
if not tests_pass:
logging.info('Discarding library.')
_PublishAAR(user, api_key, version, {'discard': True})
_DeleteUnpublishedVersion(user, api_key, version)
raise Exception('Test failure. Discarded library.')
if publish:
logging.info('Publishing library.')
_PublishAAR(user, api_key, version, {})
else:
logging.info('Note: The library has not not been published automatically.'
' Please do so manually if desired.')
finally:
if use_tmp_dir:
shutil.rmtree(build_dir, True)
def main():
args = _ParseArgs()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
ReleaseAar(args.use_goma, args.skip_tests, args.publish, args.build_dir)
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(BidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(BidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn1"),
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self,
fw_rnn_layer,
bw_rnn_layer,
is_dynamic_rnn,
is_inference,
use_sequence_length=False):
"""Build Mnist recognition model.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
batch_size = self.batch_size
if is_inference:
batch_size = 1
# input image placeholder
x = tf.placeholder(
"float", [batch_size, self.time_steps, self.n_input],
name="INPUT_IMAGE")
sequence_length = None
if use_sequence_length:
sequence_length = [self.time_steps] * batch_size
if is_dynamic_rnn:
rnn_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
sequence_length,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
rnn_inputs = tf.unstack(x, self.time_steps, 1)
# Sequence length is not supported for static since we don't have a
# wrapper for it. At training phase, we can still have sequence_length,
# but inference phase, we change it to None.
if is_inference:
sequence_length = None
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
dtype="float32",
sequence_length=sequence_length)
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self,
fw_rnn_layer,
bw_rnn_layer,
sess,
saver,
is_dynamic_rnn,
use_sequence_length=False):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
sess: Old session.
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(
fw_rnn_layer, bw_rnn_layer, is_dynamic_rnn, True, use_sequence_length)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), False, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), self.buildRnnLayer(), sess, saver, False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
def testStaticRnnMultiRnnCellWithSequenceLength(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
False,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
False,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), True, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCellWithSequenceLength(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
True,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
from tempfile import NamedTemporaryFile
from cerberus import Validator, errors
from cerberus.tests import (assert_fail, assert_has_error, assert_normalized,
assert_success)
def test_coerce():
schema = {'amount': {'coerce': int}}
document = {'amount': '1'}
expected = {'amount': 1}
assert_normalized(document, expected, schema)
def test_coerce_in_subschema():
schema = {'thing': {'type': 'dict',
'schema': {'amount': {'coerce': int}}}}
document = {'thing': {'amount': '2'}}
expected = {'thing': {'amount': 2}}
assert_normalized(document, expected, schema)
def test_coerce_not_destructive():
schema = {
'amount': {'coerce': int}
}
v = Validator(schema)
doc = {'amount': '1'}
v.validate(doc)
assert v.document is not doc
def test_coerce_catches_ValueError():
schema = {'amount': {'coerce': int}}
_errors = assert_fail({'amount': 'not_a_number'}, schema)
_errors[0].info = () # ignore exception message here
assert_has_error(_errors, 'amount', ('amount', 'coerce'),
errors.COERCION_FAILED, int)
def test_coerce_catches_TypeError():
schema = {'name': {'coerce': str.lower}}
_errors = assert_fail({'name': 1234}, schema)
_errors[0].info = () # ignore exception message here
assert_has_error(_errors, 'name', ('name', 'coerce'),
errors.COERCION_FAILED, str.lower)
def test_coerce_unknown():
schema = {'foo': {'schema': {}, 'allow_unknown': {'coerce': int}}}
document = {'foo': {'bar': '0'}}
expected = {'foo': {'bar': 0}}
assert_normalized(document, expected, schema)
def test_custom_coerce_and_rename():
class MyNormalizer(Validator):
def __init__(self, multiplier, *args, **kwargs):
super(MyNormalizer, self).__init__(*args, **kwargs)
self.multiplier = multiplier
def _normalize_coerce_multiply(self, value):
return value * self.multiplier
v = MyNormalizer(2, {'foo': {'coerce': 'multiply'}})
assert v.normalized({'foo': 2})['foo'] == 4
v = MyNormalizer(3, allow_unknown={'rename_handler': 'multiply'})
assert v.normalized({3: None}) == {9: None}
def test_coerce_chain():
drop_prefix = lambda x: x[2:]
upper = lambda x: x.upper()
schema = {'foo': {'coerce': [hex, drop_prefix, upper]}}
assert_normalized({'foo': 15}, {'foo': 'F'}, schema)
def test_coerce_chain_aborts(validator):
def dont_do_me(value):
raise AssertionError('The coercion chain did not abort after an '
'error.')
schema = {'foo': {'coerce': [hex, dont_do_me]}}
validator({'foo': '0'}, schema)
assert errors.COERCION_FAILED in validator._errors
def test_coerce_non_digit_in_sequence(validator):
# https://github.com/pyeve/cerberus/issues/211
schema = {'data': {'type': 'list',
'schema': {'type': 'integer', 'coerce': int}}}
document = {'data': ['q']}
assert validator.validated(document, schema) is None
assert (validator.validated(document, schema, always_return_document=True)
== document) # noqa: W503
def test_nullables_dont_fail_coerce():
schema = {'foo': {'coerce': int, 'nullable': True, 'type': 'integer'}}
document = {'foo': None}
assert_normalized(document, document, schema)
def test_normalized():
schema = {'amount': {'coerce': int}}
document = {'amount': '2'}
expected = {'amount': 2}
assert_normalized(document, expected, schema)
def test_rename(validator):
schema = {'foo': {'rename': 'bar'}}
document = {'foo': 0}
expected = {'bar': 0}
# We cannot use assertNormalized here since there is bug where
# Cerberus says that the renamed field is an unknown field:
# {'bar': 'unknown field'}
validator(document, schema, False)
assert validator.document == expected
def test_rename_handler():
validator = Validator(allow_unknown={'rename_handler': int})
schema = {}
document = {'0': 'foo'}
expected = {0: 'foo'}
assert_normalized(document, expected, schema, validator)
def test_purge_unknown():
validator = Validator(purge_unknown=True)
schema = {'foo': {'type': 'string'}}
document = {'bar': 'foo'}
expected = {}
assert_normalized(document, expected, schema, validator)
def test_purge_unknown_in_subschema():
schema = {'foo': {'type': 'dict',
'schema': {'foo': {'type': 'string'}},
'purge_unknown': True}}
document = {'foo': {'bar': ''}}
expected = {'foo': {}}
assert_normalized(document, expected, schema)
def test_issue_147_complex():
schema = {'revision': {'coerce': int}}
document = {'revision': '5', 'file': NamedTemporaryFile(mode='w+')}
document['file'].write(r'foobar')
document['file'].seek(0)
normalized = Validator(schema, allow_unknown=True).normalized(document)
assert normalized['revision'] == 5
assert normalized['file'].read() == 'foobar'
document['file'].close()
normalized['file'].close()
def test_issue_147_nested_dict():
schema = {'thing': {'type': 'dict',
'schema': {'amount': {'coerce': int}}}}
ref_obj = '2'
document = {'thing': {'amount': ref_obj}}
normalized = Validator(schema).normalized(document)
assert document is not normalized
assert normalized['thing']['amount'] == 2
assert ref_obj == '2'
assert document['thing']['amount'] is ref_obj
def test_coerce_in_valueschema():
# https://github.com/pyeve/cerberus/issues/155
schema = {'thing': {'type': 'dict',
'valueschema': {'coerce': int,
'type': 'integer'}}}
document = {'thing': {'amount': '2'}}
expected = {'thing': {'amount': 2}}
assert_normalized(document, expected, schema)
def test_coerce_in_keyschema():
# https://github.com/pyeve/cerberus/issues/155
schema = {'thing': {'type': 'dict',
'keyschema': {'coerce': int, 'type': 'integer'}}}
document = {'thing': {'5': 'foo'}}
expected = {'thing': {5: 'foo'}}
assert_normalized(document, expected, schema)
def test_coercion_of_sequence_items(validator):
# https://github.com/pyeve/cerberus/issues/161
schema = {'a_list': {'type': 'list', 'schema': {'type': 'float',
'coerce': float}}}
document = {'a_list': [3, 4, 5]}
expected = {'a_list': [3.0, 4.0, 5.0]}
assert_normalized(document, expected, schema, validator)
for x in validator.document['a_list']:
assert isinstance(x, float)
def test_default_missing():
_test_default_missing({'default': 'bar_value'})
def test_default_setter_missing():
_test_default_missing({'default_setter': lambda doc: 'bar_value'})
def _test_default_missing(default):
bar_schema = {'type': 'string'}
bar_schema.update(default)
schema = {'foo': {'type': 'string'},
'bar': bar_schema}
document = {'foo': 'foo_value'}
expected = {'foo': 'foo_value', 'bar': 'bar_value'}
assert_normalized(document, expected, schema)
def test_default_existent():
_test_default_existent({'default': 'bar_value'})
def test_default_setter_existent():
def raise_error(doc):
raise RuntimeError('should not be called')
_test_default_existent({'default_setter': raise_error})
def _test_default_existent(default):
bar_schema = {'type': 'string'}
bar_schema.update(default)
schema = {'foo': {'type': 'string'},
'bar': bar_schema}
document = {'foo': 'foo_value', 'bar': 'non_default'}
assert_normalized(document, document.copy(), schema)
def test_default_none_nullable():
_test_default_none_nullable({'default': 'bar_value'})
def test_default_setter_none_nullable():
def raise_error(doc):
raise RuntimeError('should not be called')
_test_default_none_nullable({'default_setter': raise_error})
def _test_default_none_nullable(default):
bar_schema = {'type': 'string',
'nullable': True}
bar_schema.update(default)
schema = {'foo': {'type': 'string'},
'bar': bar_schema}
document = {'foo': 'foo_value', 'bar': None}
assert_normalized(document, document.copy(), schema)
def test_default_none_nonnullable():
_test_default_none_nullable({'default': 'bar_value'})
def test_default_setter_none_nonnullable():
_test_default_none_nullable(
{'default_setter': lambda doc: 'bar_value'})
def _test_default_none_nonnullable(default):
bar_schema = {'type': 'string',
'nullable': False}
bar_schema.update(default)
schema = {'foo': {'type': 'string'},
'bar': bar_schema}
document = {'foo': 'foo_value', 'bar': 'bar_value'}
assert_normalized(document, document.copy(), schema)
def test_default_none_default_value():
schema = {'foo': {'type': 'string'},
'bar': {'type': 'string',
'nullable': True,
'default': None}}
document = {'foo': 'foo_value'}
expected = {'foo': 'foo_value', 'bar': None}
assert_normalized(document, expected, schema)
def test_default_missing_in_subschema():
_test_default_missing_in_subschema({'default': 'bar_value'})
def test_default_setter_missing_in_subschema():
_test_default_missing_in_subschema(
{'default_setter': lambda doc: 'bar_value'})
def _test_default_missing_in_subschema(default):
bar_schema = {'type': 'string'}
bar_schema.update(default)
schema = {'thing': {'type': 'dict',
'schema': {'foo': {'type': 'string'},
'bar': bar_schema}}}
document = {'thing': {'foo': 'foo_value'}}
expected = {'thing': {'foo': 'foo_value',
'bar': 'bar_value'}}
assert_normalized(document, expected, schema)
def test_depending_default_setters():
schema = {
'a': {'type': 'integer'},
'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1},
'c': {'type': 'integer', 'default_setter': lambda d: d['b'] * 2},
'd': {'type': 'integer',
'default_setter': lambda d: d['b'] + d['c']}
}
document = {'a': 1}
expected = {'a': 1, 'b': 2, 'c': 4, 'd': 6}
assert_normalized(document, expected, schema)
def test_circular_depending_default_setters(validator):
schema = {
'a': {'type': 'integer', 'default_setter': lambda d: d['b'] + 1},
'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1}
}
validator({}, schema)
assert errors.SETTING_DEFAULT_FAILED in validator._errors
def test_issue_250():
# https://github.com/pyeve/cerberus/issues/250
schema = {
'list': {
'type': 'list',
'schema': {
'type': 'dict',
'allow_unknown': True,
'schema': {'a': {'type': 'string'}}
}
}
}
document = {'list': {'is_a': 'mapping'}}
assert_fail(document, schema,
error=('list', ('list', 'type'), errors.BAD_TYPE,
schema['list']['type']))
def test_issue_250_no_type_pass_on_list():
# https://github.com/pyeve/cerberus/issues/250
schema = {
'list': {
'schema': {
'allow_unknown': True,
'type': 'dict',
'schema': {'a': {'type': 'string'}}
}
}
}
document = {'list': [{'a': 'known', 'b': 'unknown'}]}
assert_normalized(document, document, schema)
def test_issue_250_no_type_fail_on_dict():
# https://github.com/pyeve/cerberus/issues/250
schema = {
'list': {
'schema': {
'allow_unknown': True,
'schema': {'a': {'type': 'string'}}
}
}
}
document = {'list': {'a': {'a': 'known'}}}
assert_fail(document, schema,
error=('list', ('list', 'schema'), errors.BAD_TYPE_FOR_SCHEMA,
schema['list']['schema']))
def test_issue_250_no_type_fail_pass_on_other():
# https://github.com/pyeve/cerberus/issues/250
schema = {
'list': {
'schema': {
'allow_unknown': True,
'schema': {'a': {'type': 'string'}}
}
}
}
document = {'list': 1}
assert_normalized(document, document, schema)
def test_allow_unknown_with_of_rules():
# https://github.com/pyeve/cerberus/issues/251
schema = {
'test': {
'oneof': [
{
'type': 'dict',
'allow_unknown': True,
'schema': {'known': {'type': 'string'}}
},
{
'type': 'dict',
'schema': {'known': {'type': 'string'}}
},
]
}
}
# check regression and that allow unknown does not cause any different
# than expected behaviour for one-of.
document = {'test': {'known': 's'}}
assert_fail(document, schema,
error=('test', ('test', 'oneof'),
errors.ONEOF, schema['test']['oneof']))
# check that allow_unknown is actually applied
document = {'test': {'known': 's', 'unknown': 'asd'}}
assert_success(document, schema)
def test_271_normalising_tuples():
# https://github.com/pyeve/cerberus/issues/271
schema = {
'my_field': {
'type': 'list',
'schema': {'type': ('string', 'number', 'dict')}
}
}
document = {'my_field': ('foo', 'bar', 42, 'albert',
'kandinsky', {'items': 23})}
assert_success(document, schema)
normalized = Validator(schema).normalized(document)
assert normalized['my_field'] == ('foo', 'bar', 42, 'albert',
'kandinsky', {'items': 23})
def test_allow_unknown_wo_schema():
# https://github.com/pyeve/cerberus/issues/302
v = Validator({'a': {'type': 'dict', 'allow_unknown': True}})
v({'a': {}})
| |
from __future__ import unicode_literals
import os.path
import subprocess
import sys
import re
from .common import FileDownloader
from ..compat import compat_setenv
from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS
from ..utils import (
cli_option,
cli_valueless_option,
cli_bool_option,
cli_configuration_args,
encodeFilename,
encodeArgument,
handle_youtubedl_headers,
check_executable,
)
class ExternalFD(FileDownloader):
def real_download(self, filename, info_dict):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
retval = self._call_downloader(tmpfilename, info_dict)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr('\n')
self.report_error('%s exited with code %d' % (
self.get_basename(), retval))
return False
@classmethod
def get_basename(cls):
return cls.__name__[:-2].lower()
@property
def exe(self):
return self.params.get('external_downloader')
@classmethod
def available(cls):
return check_executable(cls.get_basename(), [cls.AVAILABLE_OPT])
@classmethod
def supports(cls, info_dict):
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
@classmethod
def can_download(cls, info_dict):
return cls.available() and cls.supports(info_dict)
def _option(self, command_option, param):
return cli_option(self.params, command_option, param)
def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None):
return cli_bool_option(self.params, command_option, param, true_value, false_value, separator)
def _valueless_option(self, command_option, param, expected_value=True):
return cli_valueless_option(self.params, command_option, param, expected_value)
def _configuration_args(self, default=[]):
return cli_configuration_args(self.params, 'external_downloader_args', default)
def _call_downloader(self, tmpfilename, info_dict):
""" Either overwrite this or implement _make_cmd """
cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
self._debug_cmd(cmd)
p = subprocess.Popen(
cmd, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if p.returncode != 0:
self.to_stderr(stderr.decode('utf-8', 'replace'))
return p.returncode
class CurlFD(ExternalFD):
AVAILABLE_OPT = '-V'
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '--location', '-o', tmpfilename]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._bool_option('--continue-at', 'continuedl', '-', '0')
cmd += self._valueless_option('--silent', 'noprogress')
cmd += self._valueless_option('--verbose', 'verbose')
cmd += self._option('--limit-rate', 'ratelimit')
cmd += self._option('--retry', 'retries')
cmd += self._option('--max-filesize', 'max_filesize')
cmd += self._option('--interface', 'source_address')
cmd += self._option('--proxy', 'proxy')
cmd += self._valueless_option('--insecure', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
def _call_downloader(self, tmpfilename, info_dict):
cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
self._debug_cmd(cmd)
# curl writes the progress to stderr so don't capture it.
p = subprocess.Popen(cmd)
p.communicate()
return p.returncode
class AxelFD(ExternalFD):
AVAILABLE_OPT = '-V'
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-o', tmpfilename]
for key, val in info_dict['http_headers'].items():
cmd += ['-H', '%s: %s' % (key, val)]
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
class WgetFD(ExternalFD):
AVAILABLE_OPT = '--version'
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._option('--bind-address', 'source_address')
cmd += self._option('--proxy', 'proxy')
cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
class Aria2cFD(ExternalFD):
AVAILABLE_OPT = '-v'
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-c']
cmd += self._configuration_args([
'--min-split-size', '1M', '--max-connection-per-server', '4'])
dn = os.path.dirname(tmpfilename)
if dn:
cmd += ['--dir', dn]
cmd += ['--out', os.path.basename(tmpfilename)]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._option('--interface', 'source_address')
cmd += self._option('--all-proxy', 'proxy')
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
cmd += ['--', info_dict['url']]
return cmd
class HttpieFD(ExternalFD):
@classmethod
def available(cls):
return check_executable('http', ['--version'])
def _make_cmd(self, tmpfilename, info_dict):
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
for key, val in info_dict['http_headers'].items():
cmd += ['%s:%s' % (key, val)]
return cmd
class FFmpegFD(ExternalFD):
@classmethod
def supports(cls, info_dict):
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps', 'm3u8', 'rtsp', 'rtmp', 'mms')
@classmethod
def available(cls):
return FFmpegPostProcessor().available
def _call_downloader(self, tmpfilename, info_dict):
url = info_dict['url']
ffpp = FFmpegPostProcessor(downloader=self)
if not ffpp.available:
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
return False
ffpp.check_version()
args = [ffpp.executable, '-y']
args += self._configuration_args()
# start_time = info_dict.get('start_time') or 0
# if start_time:
# args += ['-ss', compat_str(start_time)]
# end_time = info_dict.get('end_time')
# if end_time:
# args += ['-t', compat_str(end_time - start_time)]
if info_dict['http_headers'] and re.match(r'^https?://', url):
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
headers = handle_youtubedl_headers(info_dict['http_headers'])
args += [
'-headers',
''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
env = None
proxy = self.params.get('proxy')
if proxy:
if not re.match(r'^[\da-zA-Z]+://', proxy):
proxy = 'http://%s' % proxy
if proxy.startswith('socks'):
self.report_warning(
'%s does not support SOCKS proxies. Downloading is likely to fail. '
'Consider adding --hls-prefer-native to your command.' % self.get_basename())
# Since December 2015 ffmpeg supports -http_proxy option (see
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
# We could switch to the following code if we are able to detect version properly
# args += ['-http_proxy', proxy]
env = os.environ.copy()
compat_setenv('HTTP_PROXY', proxy, env=env)
compat_setenv('http_proxy', proxy, env=env)
protocol = info_dict.get('protocol')
if protocol == 'rtmp':
player_url = info_dict.get('player_url')
page_url = info_dict.get('page_url')
app = info_dict.get('app')
play_path = info_dict.get('play_path')
tc_url = info_dict.get('tc_url')
flash_version = info_dict.get('flash_version')
live = info_dict.get('rtmp_live', False)
if player_url is not None:
args += ['-rtmp_swfverify', player_url]
if page_url is not None:
args += ['-rtmp_pageurl', page_url]
if app is not None:
args += ['-rtmp_app', app]
if play_path is not None:
args += ['-rtmp_playpath', play_path]
if tc_url is not None:
args += ['-rtmp_tcurl', tc_url]
if flash_version is not None:
args += ['-rtmp_flashver', flash_version]
if live:
args += ['-rtmp_live', 'live']
args += ['-i', url, '-c', 'copy']
if protocol in ('m3u8', 'm3u8_native'):
if self.params.get('hls_use_mpegts', False) or tmpfilename == '-':
args += ['-f', 'mpegts']
else:
args += ['-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
elif protocol == 'rtmp':
args += ['-f', 'flv']
else:
args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])]
args = [encodeArgument(opt) for opt in args]
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
self._debug_cmd(args)
proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
try:
retval = proc.wait()
except KeyboardInterrupt:
# subprocces.run would send the SIGKILL signal to ffmpeg and the
# mp4 file couldn't be played, but if we ask ffmpeg to quit it
# produces a file that is playable (this is mostly useful for live
# streams). Note that Windows is not affected and produces playable
# files (see https://github.com/rg3/youtube-dl/issues/8300).
if sys.platform != 'win32':
proc.communicate(b'q')
raise
return retval
class AVconvFD(FFmpegFD):
pass
_BY_NAME = dict(
(klass.get_basename(), klass)
for name, klass in globals().items()
if name.endswith('FD') and name != 'ExternalFD'
)
def list_external_downloaders():
return sorted(_BY_NAME.keys())
def get_external_downloader(external_downloader):
""" Given the name of the executable, see whether we support the given
downloader . """
# Drop .exe extension on Windows
bn = os.path.splitext(os.path.basename(external_downloader))[0]
return _BY_NAME[bn]
| |
#!/usr/bin/env python
# Use Python3 integer division rules.
from __future__ import division
import sys
import os
import unittest
import warnings
from numpy import testing, pi, array, matrix, sin, cos, zeros, array, mat, \
arctan
from yeadon.solid import Stadium, Solid, StadiumSolid
from yeadon import inertia
warnings.filterwarnings('ignore', category=DeprecationWarning)
class StadiumSolidCheck(unittest.TestCase):
"""To check the formulae in yeadon.solid against those in the Yeadon1990-ii
paper.
"""
def __init__(self, density, thick0, rad0, thick1, rad1, height):
self.D = density
self.t0 = thick0
self.r0 = rad0
self.t1 = thick1
self.r1 = rad1
self.h = height
self.a = (self.r1 - self.r0) / self.r0
self.b = (self.t1 - self.t0) / self.t0
def mass(s):
return s.D * s.h * s.r0 * (
4.0 * s.t0 * s._F1(s.a, s.b) +
pi * s.r0 * s._F1(s.a, s.a)
)
def mass_center(s):
return s.D * s.h**2 * (
4.0 * s.r0 * s.t0 * s._F2(s.a, s.b) +
pi * s.r0**2 * s._F2(s.a, s.a)
) / s.mass()
def inertia_zz(s):
"""About center of mass."""
return s.D * s.h * (
4.0 * s.r0 * s.t0**3 * s._F4(s.a, s.b) / 3.0 +
pi * s.r0**2 * s.t0**2 * s._F5(s.a, s.b) +
4.0 * s.r0**3 * s.t0 * s._F4(s.b, s.a) +
pi * s.r0**4 * s._F4(s.a, s.a) / 2.0
)
def inertia_yy(s):
"""About center of mass."""
Jy_integral = (4.0 * s.r0 * s.t0**3 * s._F4(s.a, s.b) / 3.0 +
pi * s.r0**2 * s.t0**2 * s._F5(s.a, s.b) +
8.0 * s.r0**3 * s.t0 * s._F4(s.b, s.a) / 3.0 +
pi * s.r0**4 * s._F4(s.a, s.a) / 4.0
)
z2A_integral = (4.0 * s.r0 * s.t0 * s._F3(s.a, s.b) +
pi * s.r0**2 * s._F3(s.a, s.a))
about_origin = s.D * s.h * Jy_integral + s.D * s.h**3 * z2A_integral
return about_origin - s.mass() * s.mass_center()**2
def inertia_xx(s):
"""About center of mass."""
Jz_integral = (4.0 * s.r0 * s.t0**3 * s._F4(s.a, s.b) / 3.0 +
pi * s.r0**4 * s._F4(s.a, s.a) / 4.0)
z2A_integral = (4.0 * s.r0 * s.t0 * s._F3(s.a, s.b) +
pi * s.r0**2 * s._F3(s.a, s.a))
about_origin = s.D * s.h * Jz_integral + s.D * s.h**3 * z2A_integral
return about_origin - s.mass() * s.mass_center()**2
@staticmethod
def _F1(a, b):
return 1.0 + (a + b)/2.0 + a*b/3.0
@staticmethod
def _F2(a, b):
return 0.5 + (a + b)/3.0 + a*b/4.0
@staticmethod
def _F3(a, b):
return 1/3.0 + (a + b)/4.0 + a*b/5.0
@staticmethod
def _F4(a, b):
return (1.0 + (a + 3.0*b)/2.0 + (3.0*a*b + 3.0*b**2)/3.0 +
(3.0*a*b**2 + b**3)/4.0 + a*b**3/5.0)
@staticmethod
def _F5(a, b):
return (1.0 + (2.0*a + 2.0*b)/2.0 + (a**2 + 4.0*a*b + b**2)/3.0 +
2.0*a*b*(a + b)/4.0 + a**2 * b**2 / 5.0)
# define some useful functions for 2D stadia
def radius_from_perimeter_width(perimeter, width):
"""Returns the radius of the stadium given the perimeter and width."""
return (perimeter - 2.0 * width) / (2 * pi - 4)
def thickness_from_perimeter_width(perimeter, width):
"""Returns the thickness of the stadium given the perimeter and
width."""
return 0.5 * width - radius_from_perimeter_width(perimeter, width)
def radius_from_depth(depth):
return depth / 2.0
def thickness_from_depth_width(depth, width):
return (width - depth) / 2.0
def perimeter_from_depth_width(depth, width):
return 2 * (width - depth) + pi * depth
class TestStadium(unittest.TestCase):
def test_init(self):
# perimeter and width
perimeter = 2.5
width = 1.0
pw = Stadium('La6: knuckles', 'perimwidth', perimeter, width)
assert pw.label == 'La6: knuckles'
assert pw.alignment == 'ML'
testing.assert_almost_equal(pw.perimeter, perimeter)
testing.assert_almost_equal(pw.width, width)
testing.assert_almost_equal(pw.radius,
radius_from_perimeter_width(perimeter, width))
testing.assert_almost_equal(pw.thickness,
thickness_from_perimeter_width(perimeter, width))
# depth and width
depth = 1.0
width = 5.0
dw = Stadium('La6: knuckles', 'depthwidth', depth, width, 'AP')
assert dw.label == 'La6: knuckles'
assert dw.alignment == 'AP'
testing.assert_almost_equal(dw.width, width)
testing.assert_almost_equal(dw.radius, radius_from_depth(depth))
testing.assert_almost_equal(dw.thickness,
thickness_from_depth_width(depth, width))
testing.assert_almost_equal(dw.perimeter,
perimeter_from_depth_width(depth, width))
# perim
perimeter = 5.0
p = Stadium('Lk2: mid-thigh', 'perimeter', perimeter)
assert p.label == 'Lk2: mid-thigh'
assert p.alignment == 'ML'
testing.assert_almost_equal(p.perimeter, perimeter)
testing.assert_almost_equal(p.width, perimeter / pi)
testing.assert_almost_equal(p.thickness, 0.0)
testing.assert_almost_equal(p.radius, perimeter / 2.0 / pi)
# radius
radius = 1.0
r = Stadium('Lk2: mid-thigh', 'radius', radius)
assert r.label == 'Lk2: mid-thigh'
assert r.alignment == 'ML'
testing.assert_almost_equal(r.radius, radius)
testing.assert_almost_equal(r.perimeter, 2.0 * pi * radius)
testing.assert_almost_equal(r.thickness, 0.0)
testing.assert_almost_equal(r.width, 2 * radius)
# thickness and radius
thickness = 10.0
radius = 1.0
tr = Stadium('Lk2: mid-thigh', 'thicknessradius', thickness, radius)
assert tr.label == 'Lk2: mid-thigh'
assert tr.alignment == 'ML'
testing.assert_almost_equal(tr.radius, radius)
testing.assert_almost_equal(tr.thickness, thickness)
testing.assert_almost_equal(tr.perimeter, 2.0 * pi * radius + 4.0 *
thickness)
testing.assert_almost_equal(tr.width, 2 * radius + 2.0 * thickness)
def test_invalid_stadium(self):
"""Tests that if a stadium is defined in such a way that it is invalid
(negative radius or negative thickness), the correct action is taken.
"""
# TODO Redirecting stdout is not working.
actual_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stad = Stadium('Lb1: mid-arm', 'perimwidth', 1.9, 1.0)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "incorrectly" in str(w[-1])
testing.assert_almost_equal(stad.perimeter, 1.9)
testing.assert_almost_equal(stad.radius, 1.9 / (2.0 * pi))
testing.assert_almost_equal(stad.thickness, 0.0)
testing.assert_almost_equal(stad.width, 1.9 / pi)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stad = Stadium('Lb1: mid-arm', 'perimwidth', 3.15, 1.0)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
testing.assert_almost_equal(stad.perimeter, 3.15)
testing.assert_almost_equal(stad.radius, 3.15 / (2.0 * pi))
testing.assert_almost_equal(stad.thickness, 0.0)
testing.assert_almost_equal(stad.width, 3.15 / pi)
width = 1.0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
depth = (1.9 - 2.0 * width) / (pi - 2.0)
stad = Stadium('Lb1: mid-arm', 'depthwidth', depth, width)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
testing.assert_almost_equal(stad.perimeter, pi * width )
testing.assert_almost_equal(stad.radius, 0.5 * width)
testing.assert_almost_equal(stad.thickness, 0.0)
testing.assert_almost_equal(stad.width, width)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
depth = (3.15 - 2.0 * width) / (pi - 2.0)
stad = Stadium('Lb1: mid-arm', 'depthwidth', depth, width)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
testing.assert_almost_equal(stad.perimeter, pi * width )
testing.assert_almost_equal(stad.radius, 0.5 * width)
testing.assert_almost_equal(stad.thickness, 0.0)
testing.assert_almost_equal(stad.width, width)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertRaises(ValueError, Stadium, 'Lb1: mid-arm',
'thicknessradius', -.1, -.5)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertRaises(ValueError, Stadium, 'Lb1: mid-arm',
'thicknessradius', 1.0, -.3)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertRaises(ValueError, Stadium, 'Lb1: mid-arm',
'thicknessradius', -.1, 2)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
# Radius cannot be zero.
self.assertRaises(ValueError, Stadium, 'Lb1: mid-arm',
'thicknessradius', -.1, 0)
self.assertRaises(ValueError, Stadium, 'Lb1: mid-arm',
'thicknessradius', 0, 0)
sys.stdout = actual_stdout
def test_solid():
label = 'Test'
density = 3.0
height = 5.0
sol = Solid(label, density, height)
assert sol.label == label
testing.assert_almost_equal(sol.density, density)
testing.assert_almost_equal(sol.height, height)
position = array([1., 2., 3.])
# body-three 1-2-3
angles = array([0.34, 23.6, -0.2])
c1 = cos(angles[0])
c2 = cos(angles[1])
c3 = cos(angles[2])
s1 = sin(angles[0])
s2 = sin(angles[1])
s3 = sin(angles[2])
# definition of body 1-2-3 rotations from Spacecraft Dynamics, Kane,
# Likins, Levinson, 1982 page 423 (this is the transpose of what is
# presented)
C = matrix([[c2 * c3, s1 * s2 * c3 + s3 * c1, -c1 * s2 * c3 + s3 * s1],
[-c2 * s3, -s1 * s2 * s3 + c3 * c1, c1 * s2 * s3 + c3 *s1],
[s2, -s1 * c2, c1 * c2]])
sol.set_orientation(position, C, True)
testing.assert_allclose(sol.pos, position)
testing.assert_allclose(sol._rot_mat, C)
testing.assert_allclose(sol.end_pos, position + (height * C * array([[0],
[0], [1]])))
testing.assert_allclose(sol.inertia, zeros((3, 3)))
#TODO: complete tests for solid and the remaining classes in solid.py
def test_stadiumsolid_inertial_properties():
"""Checks the inertial property calculations of the StadiumSolid."""
density = 1.5
height = 4
rad0 = 3
thick0 = 1
rad1 = 1
thick1 = 2
# Create stadiumsolid using the yeadon package.
stad1 = Stadium('Ls1: umbilicus', 'thicknessradius', thick0, rad0)
stad2 = Stadium('Lb1: mid-arm', 'thicknessradius', thick1, rad1)
solid = StadiumSolid('solid', density, stad1, stad2, height)
# Create stadiumsolid for checking.
solid_des = StadiumSolidCheck(density, thick0, rad0, thick1, rad1, height)
testing.assert_almost_equal(solid.mass, solid_des.mass())
testing.assert_almost_equal(solid.rel_center_of_mass[2],
solid_des.mass_center())
testing.assert_almost_equal(solid.rel_inertia[2, 2], solid_des.inertia_zz())
testing.assert_almost_equal(solid.rel_inertia[1, 1], solid_des.inertia_yy())
testing.assert_almost_equal(solid.rel_inertia[0, 0], solid_des.inertia_xx())
# Switch the order of the stadia and ensure everything still works out.
solid = StadiumSolid('solid', density, stad2, stad1, height)
# Create stadiumsolid for checking.
solid_des = StadiumSolidCheck(density, thick1, rad1, thick0, rad0, height)
testing.assert_almost_equal(solid.mass, solid_des.mass())
testing.assert_almost_equal(solid.rel_center_of_mass[2],
solid_des.mass_center())
testing.assert_almost_equal(solid.rel_inertia[2, 2], solid_des.inertia_zz())
testing.assert_almost_equal(solid.rel_inertia[1, 1], solid_des.inertia_yy())
testing.assert_almost_equal(solid.rel_inertia[0, 0], solid_des.inertia_xx())
def test_stadiumsolidcheck_symmetry():
"""Tests the symmetry of the stadiumsolid formulae, as Yeadon presented
them (not as implemented). That means, if we switch which stadium we call 0
or 1, we look at if the mass/volume, center of mass, moments of inertia
change.
"""
density = 1.5
height = 4
# Same top and bottom.
r = 3; t = 2;
solid_desA = StadiumSolidCheck(density, t, r, t, r, height)
testing.assert_almost_equal(solid_desA.mass(),
density * height * (4 * r * t + pi * r**2))
# Diff r, same t.
r0 = 3; t0 = 2; r1 = 2; t1 = 2;
solid_desA = StadiumSolidCheck(density, t0, r0, t1, r1, height)
solid_desB = StadiumSolidCheck(density, t1, r1, t0, r0, height)
testing.assert_almost_equal(solid_desA.mass(), solid_desB.mass())
testing.assert_almost_equal(solid_desA.mass_center(),
height - solid_desB.mass_center())
testing.assert_almost_equal(solid_desA.inertia_zz(), solid_desB.inertia_zz())
testing.assert_almost_equal(solid_desA.inertia_yy(), solid_desB.inertia_yy())
testing.assert_almost_equal(solid_desA.inertia_xx(), solid_desB.inertia_xx())
# Same r, diff t.
r0 = 3; t0 = 2; r1 = 3; t1 = 1;
solid_desA = StadiumSolidCheck(density, t0, r0, t1, r1, height)
solid_desB = StadiumSolidCheck(density, t1, r1, t0, r0, height)
testing.assert_almost_equal(solid_desA.mass(), solid_desB.mass())
testing.assert_almost_equal(solid_desA.mass_center(),
height - solid_desB.mass_center())
testing.assert_almost_equal(solid_desA.inertia_zz(), solid_desB.inertia_zz())
testing.assert_almost_equal(solid_desA.inertia_yy(), solid_desB.inertia_yy())
testing.assert_almost_equal(solid_desA.inertia_xx(), solid_desB.inertia_xx())
# Diff r, diff t, one is included in the other.
r0 = 3; t0 = 2; r1 = 2; t1 = 1;
solid_desA = StadiumSolidCheck(density, t0, r0, t1, r1, height)
solid_desB = StadiumSolidCheck(density, t1, r1, t0, r0, height)
testing.assert_almost_equal(solid_desA.mass(), solid_desB.mass())
testing.assert_almost_equal(solid_desA.mass_center(),
height - solid_desB.mass_center())
testing.assert_almost_equal(solid_desA.inertia_zz(), solid_desB.inertia_zz())
testing.assert_almost_equal(solid_desA.inertia_yy(), solid_desB.inertia_yy())
testing.assert_almost_equal(solid_desA.inertia_xx(), solid_desB.inertia_xx())
# Diff r, diff t, overlap.
r0 = 3; t0 = 1; r1 = 2; t1 = 5;
solid_desA = StadiumSolidCheck(density, t0, r0, t1, r1, height)
solid_desB = StadiumSolidCheck(density, t1, r1, t0, r0, height)
testing.assert_almost_equal(solid_desA.mass(), solid_desB.mass())
testing.assert_almost_equal(solid_desA.mass_center(),
height - solid_desB.mass_center())
testing.assert_almost_equal(solid_desA.inertia_zz(), solid_desB.inertia_zz())
testing.assert_almost_equal(solid_desA.inertia_yy(), solid_desB.inertia_yy())
testing.assert_almost_equal(solid_desA.inertia_xx(), solid_desB.inertia_xx())
def test_degenerate_stadiumsolid_symmetry():
"""Tests the validity, and symmetry, of the stadiumsolid formulae, as
implemented with the t0 == 0 correction. That means, if we switch which
stadium we call 0 or 1, we look at if the mass/volume, center of mass,
moments of inertia change.
"""
density = 1.5
height = 4
height_vec = array([[0], [0], [height]])
# One thickness is 0.
r0 = 5; t0 = 0; r1 = 2; t1 = 2;
# For checking against.
stad1_des = Stadium('Ls1: umbilicus', 'thicknessradius', 0.000000001, r0)
stad1 = Stadium('Ls1: umbilicus', 'thicknessradius', t0, r0)
stad2 = Stadium('Lb1: mid-arm', 'thicknessradius', t1, r1)
solidA = StadiumSolid('solid', density, stad1_des, stad2, height)
solidA_des= StadiumSolid('solid', density, stad1, stad2, height)
solidB = StadiumSolid('solid', density, stad2, stad1_des, height)
solidB_des = StadiumSolid('solid', density, stad2, stad1, height)
testing.assert_almost_equal(solidB.mass, solidB_des.mass, decimal=4)
testing.assert_allclose(solidB.rel_center_of_mass,
solidB_des.rel_center_of_mass)
testing.assert_almost_equal(solidB.rel_inertia[0,0],
solidB_des.rel_inertia[0,0], decimal=4)
testing.assert_almost_equal(solidB.rel_inertia[1,1],
solidB_des.rel_inertia[1,1], decimal=4)
testing.assert_almost_equal(solidB.rel_inertia[2,2],
solidB_des.rel_inertia[2,2], decimal=4)
testing.assert_almost_equal(solidA.mass, solidA_des.mass)
testing.assert_allclose(solidA.rel_center_of_mass,
solidA_des.rel_center_of_mass)
testing.assert_almost_equal(solidA.rel_inertia[0,0],
solidA_des.rel_inertia[0,0], decimal=4)
testing.assert_almost_equal(solidA.rel_inertia[1,1],
solidA_des.rel_inertia[1,1], decimal=4)
testing.assert_almost_equal(solidA.rel_inertia[2,2],
solidA_des.rel_inertia[2,2], decimal=4)
testing.assert_almost_equal(solidA.mass, solidB.mass)
testing.assert_allclose(solidA.rel_center_of_mass,
height_vec - solidB.rel_center_of_mass)
testing.assert_almost_equal(solidA.rel_inertia[0,0], solidB.rel_inertia[0,0])
testing.assert_almost_equal(solidA.rel_inertia[1,1], solidB.rel_inertia[1,1])
testing.assert_almost_equal(solidA.rel_inertia[2,2], solidB.rel_inertia[2,2])
# Both thicknesses are zero.
r0 = 3; t0 = 0; r1 = 2; t1 = 0;
stad1 = Stadium('Ls1: umbilicus', 'thicknessradius', t0, r0)
stad2 = Stadium('Lb1: mid-arm', 'thicknessradius', t1, r1)
solidA = StadiumSolid('solid', density, stad1, stad2, height)
solidB = StadiumSolid('solid', density, stad2, stad1, height)
testing.assert_almost_equal(solidA.mass, solidB.mass)
testing.assert_allclose(solidA.rel_center_of_mass,
height_vec - solidB.rel_center_of_mass)
testing.assert_almost_equal(solidA.rel_inertia[0,0], solidB.rel_inertia[0,0])
testing.assert_almost_equal(solidA.rel_inertia[1,1], solidB.rel_inertia[1,1])
testing.assert_almost_equal(solidA.rel_inertia[2,2], solidB.rel_inertia[2,2])
# A third case for when both t0 and t0 are zero.
# TODO
def test_stadiumsolidcheck_against_truncated_cone():
"""Tests the StadiumSolidCheck formulae above against truncated cone
formulae for degenerate stadia; using a thin trapezium."""
def truncated_cone_mass(density, radius0, radius1, height):
return density / 3.0 * pi * height * (radius0**2 + radius1**2 +
radius0 * radius1)
density = 1.5
height = 4
rad0 = 3
thick0 = 0.0000001
rad1 = 4
thick1 = 0.0000001
# Create stadiumsolid for checking.
solid_des = StadiumSolidCheck(density, thick0, rad0, thick1, rad1, height)
testing.assert_almost_equal(solid_des.mass(),
truncated_cone_mass(density, rad0, rad1, height), decimal=4)
# Now only one level is a circle, so we'll add in an extruded triangle in the
# middle to find its volume on our own (to check). Radii must be the same
# for this to work out in the simple case.
thick0 = 1
rad1 = 3
solid_des2 = StadiumSolidCheck(density, thick0, rad0, thick1, rad1, height)
testing.assert_almost_equal(solid_des2.mass(),
truncated_cone_mass(density, rad0, rad1, height) +
density * (2 * thick0 * height * 0.5 * (rad0 * 2)), decimal=4)
def test_rotate_inertia():
"""Are we obtaining the global inertia properly?"""
density = 1.5
height = 4
height_vec = array([[0], [0], [height]])
# One thickness is 0.
r0 = 5; t0 = 0; r1 = 2; t1 = 2;
stad1 = Stadium('Ls1: umbilicus', 'thicknessradius', t0, r0)
stad2 = Stadium('Lb1: mid-arm', 'thicknessradius', t1, r1)
solidA = StadiumSolid('solid', density, stad1, stad2, height)
# This inertia matrix describes two 1kg point masses at (0, 2, 1) and
# (0, -2, -1) in the global reference frame, A.
solidA._rel_inertia = mat([[10.0, 0.0, 0.0],
[0.0, 2.0, -4.0],
[0.0, -4.0, 8.0]])
# If we want the inertia about a new reference frame, B, such that the
# two masses lie on the yb axis we can rotate about xa through the angle
# arctan(1/2). Note that this function returns R from va = R * vb.
solidA._rot_mat = inertia.rotate_space_123((arctan(1.0 / 2.0), 0.0, 0.0))
solidA.calc_properties()
I_b = solidA.inertia
expected_I_b = mat([[10.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 10.0]])
testing.assert_allclose(I_b, expected_I_b, atol=1e-16)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Main 'zirkon' tool program.
"""
import argparse
import logging
import os
import sys
from .errors import FormatError
from .filetype import (
get_fmts, get_config_classes,
get_config_class_name,
get_default_fmt, parse_filetype,
)
from .discover import discover, search_filetype
from .config import Config
from .program_config import ProgramConfig
from .schema import Schema
from .toolbox.loader import load_module
from .toolbox.tool_utils import (
IoManager,
die,
tabulate_filetypes,
trace_errors,
)
from .utils import (
get_key, set_key, del_key,
create_template_from_schema, sort_section,
)
from .validation import Validation
from .version import VERSION
__author__ = "Simone Campagna"
__copyright__ = 'Copyright (c) 2015 Simone Campagna'
__license__ = 'Apache License Version 2.0'
__all__ = [
'main',
]
def list_files(params, *, config_dirs, schema_dirs):
"""Lists files.
Parameters
----------
params: dict
common params
config_dirs: tuple
list of config directories
schema_dirs: tuple
list of schema directories
"""
printer = params["printer"]
header = params.get("header", True)
paths = []
for config_dir in config_dirs:
paths.append((config_dir, (Config,)))
for schema_dir in schema_dirs:
paths.append((schema_dir, (Schema,)))
filetypes = discover(*paths, standard_paths=True)
for line in tabulate_filetypes(filetypes, header=header):
printer(line)
def _copy_keys(source, target, keys):
"""Copy keys from source to target"""
for key in keys:
set_key(target, key, get_key(source, key), parents=True)
def _delete_keys(section, keys):
"""Delete keys from section"""
for key in keys:
del_key(section, key)
def _select_discard_keys(config, select_keys, discard_keys):
"""Select/discard keys"""
if select_keys:
new_config = config.__class__()
_copy_keys(config, new_config, select_keys)
return new_config
elif discard_keys:
new_config = config.copy()
_delete_keys(new_config, discard_keys)
return new_config
else:
return config
def read_config(params, *,
defaults, evaluate, sort_keys,
input_filetypes, schema_filetypes,
output_filetype, validation_filetype, force,
select_keys, discard_keys):
# pylint: disable=too-many-locals
"""Reads a config file.
Parameters
----------
params: dict
common parameters
defaults: bool
enable defaults
evaluate: bool
evaluate macros
sort_keys: bool
sort section keys
input_filetypes: list
list of input filetypes
schema_filetypes: list
list of schema filetypes
output_filetype: FileType
output filetype
validation_filetype: FileType
validation filetype
force: bool
force overwriting output files
select_keys: list
list of keys to be selected
discard_keys: list
list of keys to be discarded
"""
printer = params["printer"]
logger = params["logger"]
io_manager = IoManager(printer=printer, logger=logger)
with trace_errors(params["debug"]):
schema = None
if schema_filetypes:
schema = Schema()
for schema_filetype in schema_filetypes:
io_manager.read_obj(schema, schema_filetype)
default_output_fmt = input_filetypes[0].fmt
config_class = input_filetypes[0].config_class
config_args = {}
if issubclass(config_class, Config):
config_args['defaults'] = not defaults
config = config_class(**config_args)
input_filenames = []
for input_filetype in input_filetypes:
input_filenames.append(input_filetype.filepath)
io_manager.read_obj(config, input_filetype)
if schema is not None:
validation = schema.validate(config)
if validation_filetype is not None:
io_manager.write_obj(validation, validation_filetype, overwrite=force)
if validation and validation_filetype is None:
logger.warning("validation failed for config %s:", ", ".join(input_filenames))
io_manager.dump_obj(validation, print_function=logger.warning)
if evaluate:
config = config.__class__(config.as_dict(evaluate=True))
config = _select_discard_keys(config, select_keys, discard_keys)
if sort_keys:
sort_section(config)
if output_filetype is None:
io_manager.dump_obj(config, fmt=default_output_fmt)
else:
io_manager.write_obj(config, output_filetype, overwrite=force)
def create_config(params, *,
defaults, evaluate, sort_keys,
schema_filetypes, output_filetype, force,
select_keys, discard_keys):
"""Creates a config file from a schema.
Parameters
----------
params: dict
common parameters
defaults: bool
enable defaults
evaluate: bool
evaluate macros
sort_keys: bool
sort section keys
schema_filetypes: list
list of schema filetypes
output_filetype: FileType
output filetype
force: bool
force overwriting output files
select_keys: list
list of keys to be selected
discard_keys: list
list of keys to be discarded
"""
printer = params["printer"]
logger = params["logger"]
default_output_fmt = params["default_fmt"]
io_manager = IoManager(printer=printer, logger=logger)
schema = Schema()
for schema_filetype in schema_filetypes:
io_manager.read_obj(schema, schema_filetype)
config = Config(defaults=not defaults)
create_template_from_schema(schema=schema, config=config)
if evaluate:
config = config.__class__(config.as_dict(evaluate=True))
config = _select_discard_keys(config, select_keys, discard_keys)
if sort_keys:
sort_section(config)
if output_filetype is None:
io_manager.dump_obj(config, fmt=default_output_fmt)
else:
io_manager.write_obj(config, output_filetype, overwrite=force)
def program(params, *,
module_names, pmode, args,
defaults, evaluate, sort_keys,
output_filetype, validation_filetype, force,
select_keys, discard_keys):
# pylint: disable=too-many-locals
"""Reads a config file.
Parameters
----------
params: dict
common parameters
pmode: str
"config" or "schema"
module_names: list
list of configured module names
args: list
list of positional args
defaults: bool
enable defaults
evaluate: bool
evaluate macros
sort_keys: bool
sort section keys
output_filetype: FileType
output filetype
validation_filetype: FileType
validation filetype
force: bool
force overwriting output files
"""
printer = params["printer"]
logger = params["logger"]
io_manager = IoManager(printer=printer, logger=logger)
default_output_fmt = params["default_fmt"]
with trace_errors(params["debug"]):
modules = []
for module_name in module_names:
modules.append(load_module(module_name))
program_config = ProgramConfig(*modules)
if pmode == "config":
program_info = program_config.parse_args(args, raise_on_error=False)
config = program_info.config
validation = program_info.validation
if validation_filetype is not None:
io_manager.write_obj(validation, validation_filetype, overwrite=force)
if validation and validation_filetype is None:
logger.warning("validation failed:")
io_manager.dump_obj(validation, print_function=logger.warning)
if validation:
return
config = Config(config.as_dict(defaults=defaults, evaluate=evaluate))
elif pmode == "schema":
config = program_config.schema # pylint: disable=redefined-variable-type
config = _select_discard_keys(config, select_keys, discard_keys)
if sort_keys:
sort_section(config)
if output_filetype is None:
io_manager.dump_obj(config, fmt=default_output_fmt)
else:
io_manager.write_obj(config, output_filetype, overwrite=force)
def _create_logger(stream, verbose_level):
"""Creates a logger.
Parameters
----------
stream: file
the logger's stream
verbose_level: int
the initial verbose level
Returns
-------
Logger
the logger
"""
logger = logging.getLogger("ZIRKON-LOG")
if verbose_level == 0:
log_level = logging.ERROR
elif verbose_level == 1:
log_level = logging.WARNING
elif verbose_level == 2:
log_level = logging.INFO
elif verbose_level >= 3:
log_level = logging.DEBUG
log_handler = logging.StreamHandler(stream=stream)
log_formatter = logging.Formatter("{levelname:8s} {message}", style="{")
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
logger.setLevel(log_level)
return logger
def _filetype(logger, filearg, config_class=None, fmt=None):
"""Converts a string from command line to filetype."""
try:
return parse_filetype(filename=filearg, config_class=config_class, fmt=fmt)
except FormatError as err:
die(logger, "invalid format {} (available formats: {})".format(err.args[0], "|".join(get_fmts())))
def _input_filetype(logger, filearg, config_class=None):
"""Converts a string from command line to an input filetype.
Parameters
----------
logger: Logger
the logger
filearg: str
the command line string
config_class: ConfigBase, optional
the expected config class (defaults to None)
Returns
-------
FileType
the FileType object
"""
if filearg is None: # pragma: no cover
return None
filetype = _filetype(logger, filearg, config_class=config_class)
found_filetypes = tuple(search_filetype(filetype))
if len(found_filetypes) == 0:
if not os.path.exists(filetype.filepath):
die(logger, "invalid value {}: input file not found".format(
filearg))
elif len(found_filetypes) > 1: # pragma: no cover
logger.warning("{!r}: multiple matches: found {} matches:".format(filearg, len(found_filetypes)))
for line in tabulate_filetypes(found_filetypes):
logger.warning(" * {}".format(line))
die(logger, "invalid value {!r}: multiple matches".format(
filearg))
undetected_attributes = []
for attribute in 'config_class', 'fmt':
if getattr(filetype, attribute) is None:
undetected_attributes.append(attribute)
if undetected_attributes:
die(logger, "invalid value {}: cannot detect {}".format(
filearg, ', '.join(undetected_attributes)))
return filetype
def _input_schema_filetype(logger, filearg):
"""Converts a string from command line to an input schema filetype.
Parameters
----------
logger: Logger
the logger
filearg: str
the command line string
Returns
-------
FileType
the FileType object
"""
return _input_filetype(logger, filearg, config_class=Schema)
def _output_filetype(logger, filearg):
"""Converts a string from command line to an output filetype.
Parameters
----------
logger: Logger
the logger
filearg: str
the command line string
Returns
-------
FileType
the FileType object
"""
return _filetype(logger, filearg)
def _validation_filetype(logger, filearg):
"""Converts a string from command line to an output validation filetype.
Parameters
----------
logger: Logger
the logger
filearg: str
the command line string
Returns
-------
FileType
the FileType object
"""
return _filetype(logger, filearg, config_class=Validation)
def _validate_args(logger, args):
"""Validates args.
Parameters
----------
logger: Logger
the logger
filearg: str
the command line string
"""
input_filetypes = getattr(args, "input_filetypes", None)
schema_filetypes = getattr(args, "schema_filetypes", None)
output_filetype = getattr(args, "output_filetype", None)
validation_filetype = getattr(args, "validation_filetype", None)
ref_fmt = None
ref_config_class = None
if input_filetypes:
ref_fmt = input_filetypes[0].fmt
ref_config_class = input_filetypes[0].config_class
elif schema_filetypes:
ref_fmt = schema_filetypes[0].fmt
if output_filetype is not None:
replace_d = {}
if output_filetype.fmt is None and ref_fmt is not None:
replace_d['fmt'] = ref_fmt
if ref_config_class is not None:
if output_filetype.config_class is None:
replace_d['config_class'] = ref_config_class
elif output_filetype.config_class != ref_config_class:
logger.warning("output filename {}: config_class mismatch: {} or {}?".format(
output_filetype.filepath,
ref_config_class.__name__,
output_filetype.config_class.__name__))
replace_d['config_class'] = ref_config_class
if replace_d:
args.output_filetype = output_filetype._replace(**replace_d)
if validation_filetype is not None:
if validation_filetype.fmt is None and ref_fmt is not None:
args.validation_filetype = validation_filetype._replace(fmt=ref_fmt)
def main_parse_args(log_stream=sys.stderr, out_stream=sys.stdout, argv=None):
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
"""Parses command line arguments.
Parameters
----------
log_stream: file, optional
the log file (defaults to sys.stderr)
out_stream: file, optional
the output file (defaults to sys.stdout)
argv: list, optional
the command line arguments (defaults to None, meaning sys.args[1:])
"""
if argv is None: # pragma: no cover
argv = sys.argv[1:]
default_verbose_level = 1
config_class_names = [get_config_class_name(config_class) for config_class in get_config_classes()]
parser_args = dict(
formatter_class=argparse.RawDescriptionHelpFormatter,
)
common_parser = argparse.ArgumentParser(
add_help=False,
**parser_args)
top_level_description = """\
Zirkon tool - read/write/validate config files.
The FILE values can be specified with the following syntax:
filepath[:fmt[:config_class]]
where fmt can be any of the available formats:
{fmts}
and config_class any of the available classes:
{config_classes}
Environment variables
---------------------
* ZIRKON_CONFIG_PATH colon-separated list of directories for config files search
* ZIRKON_SCHEMA_PATH colon-separated list of directories for schema files search
""".format(fmts=', '.join(get_fmts()),
config_classes=', '.join(config_class_names))
top_level_parser = argparse.ArgumentParser(
"zirkon",
parents=(common_parser,),
description=top_level_description,
**parser_args)
top_level_parser.add_argument(
"--verbose", "-v",
dest="verbose_level",
action="count",
default=default_verbose_level,
help="increase verbosity")
top_level_parser.add_argument(
"--quiet", "-q",
dest="verbose_level",
action="store_const",
const=0,
default=default_verbose_level,
help="quiet mode (only errors are shown)")
top_level_parser.add_argument(
"--debug",
action="store_true",
default=False,
help="debug mode")
top_level_parser.add_argument(
"--version",
action="version",
version="%(prog)s {}".format(VERSION),
help="show version")
subparsers = top_level_parser.add_subparsers()
list_parser = subparsers.add_parser(
"list",
parents=(common_parser,),
description="""Lists all the available config and schema files.""",
**parser_args)
list_parser.set_defaults(
function=list_files,
function_args=("config_dirs", "schema_dirs"))
read_parser = subparsers.add_parser(
"read",
parents=(common_parser,),
description="""Reads a config file.""",
**parser_args)
read_parser.set_defaults(
function=read_config,
function_args=("defaults", "evaluate", "sort_keys",
"input_filetypes", "schema_filetypes",
"output_filetype", "validation_filetype",
"force", "select_keys", "discard_keys"))
create_parser = subparsers.add_parser(
"create",
parents=(common_parser,),
description="""Creates a config file from a schema.""",
**parser_args)
create_parser.set_defaults(
function=create_config,
function_args=("defaults", "evaluate", "sort_keys",
"schema_filetypes", "output_filetype", "force",
"select_keys", "discard_keys"))
program_parser = subparsers.add_parser(
"program",
parents=(common_parser,),
description="""Loads and show program configuration.""",
**parser_args)
program_parser.set_defaults(
function=program,
function_args=("module_names", "pmode", "args", "defaults", "evaluate",
"sort_keys", "output_filetype", "force", "validation_filetype",
"select_keys", "discard_keys"))
default_pmode = "schema"
program_parser.add_argument(
"args",
nargs="*",
help="arguments to be passed to the program_config object")
program_parser.add_argument(
"--module", "-m",
metavar="MOD",
dest="module_names",
default=[],
action="append",
required=True,
type=str,
help="load a configured module (can be repeated)")
program_group = program_parser.add_mutually_exclusive_group()
program_group.add_argument(
"--config", "-c",
dest="pmode",
default=default_pmode,
action="store_const",
const="config",
help="show program config")
program_group.add_argument(
"--schema", "-s",
dest="pmode",
default=default_pmode,
action="store_const",
const="schema",
help="show program schema")
for parser in (read_parser,):
parser.add_argument(
"--input", "-i",
dest="input_filetypes",
metavar="IC",
default=[],
required=True,
type=str,
action="append",
help="input file")
for parser in read_parser, create_parser, program_parser:
parser.add_argument(
"--sort-keys", "-S",
dest="sort_keys",
default=False,
action="store_true",
help="sort keys")
schema_required = {}
schema_required[read_parser] = False
schema_required[create_parser] = True
for parser in read_parser, create_parser, program_parser:
parser.add_argument(
"--defaults", "-d",
action="store_true",
default=False,
help="show default values")
parser.add_argument(
"--evaluate", "-e",
action="store_true",
default=False,
help="evaluate macros")
parser.add_argument(
"--output", "-o",
dest="output_filetype",
metavar="OC",
default=None,
type=str,
help="output file")
parser.add_argument(
"--force", "-f",
action="store_true",
default=False,
help="force overwriting existing output files")
select_key_group = parser.add_mutually_exclusive_group()
select_key_group.add_argument(
"--select-key", "-k",
dest="select_keys",
metavar="KEY",
action='append',
default=[],
help="select config key to be shown (can be repeated); for instance, '-k /sub/x -k sub.opts.value'")
select_key_group.add_argument(
"--discard-key", "-K",
dest="discard_keys",
metavar="KEY",
action='append',
default=[],
help="discard config key to be shown (can be repeated); for instance, '-k /sub/x -k sub.opts.value'")
for parser in read_parser, create_parser:
parser.add_argument(
"--schema", "-s",
dest="schema_filetypes",
metavar="FILE",
default=[],
required=schema_required[parser],
type=str,
action="append",
help="schema input file")
for parser in (read_parser, program_parser):
parser.add_argument(
"--validation", "-V",
dest="validation_filetype",
metavar="FILE",
default=None,
type=str,
help="validation output file")
for parser in (list_parser,):
parser.add_argument(
"--config-dir", "-cd",
dest="config_dirs",
metavar="CD",
action="append",
default=[],
type=str,
help="add config dir")
parser.add_argument(
"--schema-dir", "-sd",
dest="schema_dirs",
metavar="SD",
action="append",
default=[],
type=str,
help="add config dir")
args = top_level_parser.parse_args(argv)
logger = _create_logger(log_stream, args.verbose_level)
printer = lambda x: print(x, file=out_stream, flush=True) # flake8: noqa
list_converter_d = {
"input_filetypes": _input_filetype,
"schema_filetypes": _input_schema_filetype,
}
for key, converter in list_converter_d.items():
if hasattr(args, key):
lst = [converter(logger=logger, filearg=value) for value in getattr(args, key)]
setattr(args, key, lst)
converter_d = {
"output_filetype": _output_filetype,
"validation_filetype": _validation_filetype,
}
for key, converter in converter_d.items():
if hasattr(args, key):
setattr(args, key, converter(logger=logger, filearg=getattr(args, key)))
_validate_args(logger, args)
for key, _ in converter_d.items():
if hasattr(args, key):
logger.debug("%20s: %s", key, getattr(args, key))
return top_level_parser, args, logger, printer
def main(log_stream=sys.stderr, out_stream=sys.stdout, argv=None):
"""Runs the main program.
Parameters
----------
log_stream: file, optional
the log file (defaults to sys.stderr)
out_stream: file, optional
the output file (defaults to sys.stdout)
argv: list, optional
the command line arguments (defaults to None, meaning sys.args[1:])
"""
parser, args, logger, printer = main_parse_args(
log_stream=log_stream, out_stream=out_stream, argv=argv)
function = getattr(args, "function", None)
if not function:
parser.print_help()
sys.exit(1)
function_args = {}
for function_arg in args.function_args:
function_args[function_arg] = getattr(args, function_arg)
params = {}
params["printer"] = printer
params["logger"] = logger
params["default_fmt"] = get_default_fmt()
for key in ("debug",):
params[key] = getattr(args, key)
return function(params=params, **function_args)
if __name__ == "__main__":
main()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""ExtensionManager
"""
import logging
import operator
from . import _cache
from .exception import NoMatches
LOG = logging.getLogger(__name__)
class Extension(object):
"""Book-keeping object for tracking extensions.
The arguments passed to the constructor are saved as attributes of
the instance using the same names, and can be accessed by the
callables passed to :meth:`map` or when iterating over an
:class:`ExtensionManager` directly.
:param name: The entry point name.
:type name: str
:param entry_point: The EntryPoint instance returned by
:mod:`entrypoints`.
:type entry_point: EntryPoint
:param plugin: The value returned by entry_point.load()
:param obj: The object returned by ``plugin(*args, **kwds)`` if the
manager invoked the extension on load.
"""
def __init__(self, name, entry_point, plugin, obj):
self.name = name
self.entry_point = entry_point
self.plugin = plugin
self.obj = obj
@property
def module_name(self):
"""The name of the module from which the entry point is loaded.
:return: A string in 'dotted.module' format.
"""
# NOTE: importlib_metadata from PyPI includes this but the
# Python 3.8 standard library does not.
match = self.entry_point.pattern.match(self.entry_point.value)
return match.group('module')
@property
def extras(self):
"""The 'extras' settings for the plugin."""
# NOTE: The underlying package returns re.Match objects for
# some reason. Translate those to the matched strings, which
# seem more useful.
return [
# Python 3.6 returns _sre.SRE_Match objects. Later
# versions of python return re.Match objects. Both types
# have a 'string' attribute containing the text that
# matched the pattern.
getattr(e, 'string', e)
for e in self.entry_point.extras
]
@property
def attr(self):
"""The attribute of the module to be loaded."""
match = self.entry_point.pattern.match(self.entry_point.value)
return match.group('attr')
@property
def entry_point_target(self):
"""The module and attribute referenced by this extension's entry_point.
:return: A string representation of the target of the entry point in
'dotted.module:object' format.
"""
return self.entry_point.value
class ExtensionManager(object):
"""Base class for all of the other managers.
:param namespace: The namespace for the entry points.
:type namespace: str
:param invoke_on_load: Boolean controlling whether to invoke the
object returned by the entry point after the driver is loaded.
:type invoke_on_load: bool
:param invoke_args: Positional arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_args: tuple
:param invoke_kwds: Named arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_kwds: dict
:param propagate_map_exceptions: Boolean controlling whether exceptions
are propagated up through the map call or whether they are logged and
then ignored
:type propagate_map_exceptions: bool
:param on_load_failure_callback: Callback function that will be called when
an entrypoint can not be loaded. The arguments that will be provided
when this is called (when an entrypoint fails to load) are
(manager, entrypoint, exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
"""
def __init__(self, namespace,
invoke_on_load=False,
invoke_args=(),
invoke_kwds={},
propagate_map_exceptions=False,
on_load_failure_callback=None,
verify_requirements=False):
self._init_attributes(
namespace,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback)
extensions = self._load_plugins(invoke_on_load,
invoke_args,
invoke_kwds,
verify_requirements)
self._init_plugins(extensions)
@classmethod
def make_test_instance(cls, extensions, namespace='TESTING',
propagate_map_exceptions=False,
on_load_failure_callback=None,
verify_requirements=False):
"""Construct a test ExtensionManager
Test instances are passed a list of extensions to work from rather
than loading them from entry points.
:param extensions: Pre-configured Extension instances to use
:type extensions: list of :class:`~stevedore.extension.Extension`
:param namespace: The namespace for the manager; used only for
identification since the extensions are passed in.
:type namespace: str
:param propagate_map_exceptions: When calling map, controls whether
exceptions are propagated up through the map call or whether they
are logged and then ignored
:type propagate_map_exceptions: bool
:param on_load_failure_callback: Callback function that will
be called when an entrypoint can not be loaded. The
arguments that will be provided when this is called (when
an entrypoint fails to load) are (manager, entrypoint,
exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
:return: The manager instance, initialized for testing
"""
o = cls.__new__(cls)
o._init_attributes(namespace,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback)
o._init_plugins(extensions)
return o
def _init_attributes(self, namespace, propagate_map_exceptions=False,
on_load_failure_callback=None):
self.namespace = namespace
self.propagate_map_exceptions = propagate_map_exceptions
self._on_load_failure_callback = on_load_failure_callback
def _init_plugins(self, extensions):
self.extensions = extensions
self._extensions_by_name_cache = None
@property
def _extensions_by_name(self):
if self._extensions_by_name_cache is None:
d = {}
for e in self.extensions:
d[e.name] = e
self._extensions_by_name_cache = d
return self._extensions_by_name_cache
ENTRY_POINT_CACHE = {}
def list_entry_points(self):
"""Return the list of entry points for this namespace.
The entry points are not actually loaded, their list is just read and
returned.
"""
if self.namespace not in self.ENTRY_POINT_CACHE:
eps = list(_cache.get_group_all(self.namespace))
self.ENTRY_POINT_CACHE[self.namespace] = eps
return self.ENTRY_POINT_CACHE[self.namespace]
def entry_points_names(self):
"""Return the list of entry points names for this namespace."""
return list(map(operator.attrgetter("name"), self.list_entry_points()))
def _load_plugins(self, invoke_on_load, invoke_args, invoke_kwds,
verify_requirements):
extensions = []
for ep in self.list_entry_points():
LOG.debug('found extension %r', ep)
try:
ext = self._load_one_plugin(ep,
invoke_on_load,
invoke_args,
invoke_kwds,
verify_requirements,
)
if ext:
extensions.append(ext)
except (KeyboardInterrupt, AssertionError):
raise
except Exception as err:
if self._on_load_failure_callback is not None:
self._on_load_failure_callback(self, ep, err)
else:
# Log the reason we couldn't import the module,
# usually without a traceback. The most common
# reason is an ImportError due to a missing
# dependency, and the error message should be
# enough to debug that. If debug logging is
# enabled for our logger, provide the full
# traceback.
LOG.error('Could not load %r: %s', ep.name, err,
exc_info=LOG.isEnabledFor(logging.DEBUG))
return extensions
def _load_one_plugin(self, ep, invoke_on_load, invoke_args, invoke_kwds,
verify_requirements):
# NOTE(dhellmann): Using require=False is deprecated in
# setuptools 11.3.
if hasattr(ep, 'resolve') and hasattr(ep, 'require'):
if verify_requirements:
ep.require()
plugin = ep.resolve()
else:
plugin = ep.load()
if invoke_on_load:
obj = plugin(*invoke_args, **invoke_kwds)
else:
obj = None
return Extension(ep.name, ep, plugin, obj)
def names(self):
"Returns the names of the discovered extensions"
# We want to return the names of the extensions in the order
# they would be used by map(), since some subclasses change
# that order.
return [e.name for e in self.extensions]
def map(self, func, *args, **kwds):
"""Iterate over the extensions invoking func() for each.
The signature for func() should be::
def func(ext, *args, **kwds):
pass
The first argument to func(), 'ext', is the
:class:`~stevedore.extension.Extension` instance.
Exceptions raised from within func() are propagated up and
processing stopped if self.propagate_map_exceptions is True,
otherwise they are logged and ignored.
:param func: Callable to invoke for each extension.
:param args: Variable arguments to pass to func()
:param kwds: Keyword arguments to pass to func()
:returns: List of values returned from func()
"""
if not self.extensions:
# FIXME: Use a more specific exception class here.
raise NoMatches('No %s extensions found' % self.namespace)
response = []
for e in self.extensions:
self._invoke_one_plugin(response.append, func, e, args, kwds)
return response
@staticmethod
def _call_extension_method(extension, method_name, *args, **kwds):
return getattr(extension.obj, method_name)(*args, **kwds)
def map_method(self, method_name, *args, **kwds):
"""Iterate over the extensions invoking a method by name.
This is equivalent of using :meth:`map` with func set to
`lambda x: x.obj.method_name()`
while being more convenient.
Exceptions raised from within the called method are propagated up
and processing stopped if self.propagate_map_exceptions is True,
otherwise they are logged and ignored.
.. versionadded:: 0.12
:param method_name: The extension method name
to call for each extension.
:param args: Variable arguments to pass to method
:param kwds: Keyword arguments to pass to method
:returns: List of values returned from methods
"""
return self.map(self._call_extension_method,
method_name, *args, **kwds)
def _invoke_one_plugin(self, response_callback, func, e, args, kwds):
try:
response_callback(func(e, *args, **kwds))
except Exception as err:
if self.propagate_map_exceptions:
raise
else:
LOG.error('error calling %r: %s', e.name, err)
LOG.exception(err)
def items(self):
"""Return an iterator of tuples of the form (name, extension).
This is analogous to the Mapping.items() method.
"""
return self._extensions_by_name.items()
def __iter__(self):
"""Produce iterator for the manager.
Iterating over an ExtensionManager produces the :class:`Extension`
instances in the order they would be invoked.
"""
return iter(self.extensions)
def __getitem__(self, name):
"""Return the named extension.
Accessing an ExtensionManager as a dictionary (``em['name']``)
produces the :class:`Extension` instance with the
specified name.
"""
return self._extensions_by_name[name]
def __contains__(self, name):
"""Return true if name is in list of enabled extensions."""
return any(extension.name == name for extension in self.extensions)
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
from kombu import Connection
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_FAILED_STATES
from st2common.constants.action import LIVEACTION_COMPLETED_STATES
from st2common.constants.triggers import INTERNAL_TRIGGER_TYPES
from st2common.models.api.trace import TraceContext
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.action import Action
from st2common.persistence.policy import Policy
from st2common import policies
from st2common.models.system.common import ResourceReference
from st2common.persistence.execution import ActionExecution
from st2common.services import trace as trace_service
from st2common.transport import consumers, liveaction, publishers
from st2common.transport import utils as transport_utils
from st2common.transport.reactor import TriggerDispatcher
from st2common.util import isotime
from st2common.util import jinja as jinja_utils
from st2common.constants.action import ACTION_CONTEXT_KV_PREFIX
from st2common.constants.action import ACTION_PARAMETERS_KV_PREFIX
from st2common.constants.action import ACTION_RESULTS_KV_PREFIX
from st2common.constants.keyvalue import FULL_SYSTEM_SCOPE, SYSTEM_SCOPE, DATASTORE_PARENT_SCOPE
from st2common.services.keyvalues import KeyValueLookup
__all__ = [
'Notifier',
'get_notifier'
]
LOG = logging.getLogger(__name__)
ACTIONUPDATE_WORK_Q = liveaction.get_queue('st2.notifiers.work',
routing_key=publishers.UPDATE_RK)
ACTION_SENSOR_ENABLED = cfg.CONF.action_sensor.enable
# XXX: Fix this nasty positional dependency.
ACTION_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][0]
NOTIFY_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][1]
class Notifier(consumers.MessageHandler):
message_type = LiveActionDB
def __init__(self, connection, queues, trigger_dispatcher=None):
super(Notifier, self).__init__(connection, queues)
if not trigger_dispatcher:
trigger_dispatcher = TriggerDispatcher(LOG)
self._trigger_dispatcher = trigger_dispatcher
self._notify_trigger = ResourceReference.to_string_reference(
pack=NOTIFY_TRIGGER_TYPE['pack'],
name=NOTIFY_TRIGGER_TYPE['name'])
self._action_trigger = ResourceReference.to_string_reference(
pack=ACTION_TRIGGER_TYPE['pack'],
name=ACTION_TRIGGER_TYPE['name'])
def process(self, liveaction):
live_action_id = str(liveaction.id)
extra = {'live_action_db': liveaction}
LOG.debug('Processing liveaction %s', live_action_id, extra=extra)
if liveaction.status not in LIVEACTION_COMPLETED_STATES:
LOG.debug('Skipping processing of liveaction %s since it\'s not in a completed state' %
(live_action_id), extra=extra)
return
execution = self._get_execution_for_liveaction(liveaction)
if not execution:
LOG.exception('Execution object corresponding to LiveAction %s not found.',
live_action_id, extra=extra)
return None
self._apply_post_run_policies(liveaction_db=liveaction)
if liveaction.notify is not None:
self._post_notify_triggers(liveaction=liveaction, execution=execution)
self._post_generic_trigger(liveaction=liveaction, execution=execution)
def _get_execution_for_liveaction(self, liveaction):
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
if not execution:
return None
return execution
def _post_notify_triggers(self, liveaction=None, execution=None):
notify = getattr(liveaction, 'notify', None)
if not notify:
return
if notify.on_complete:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_complete,
default_message_suffix='completed.')
if liveaction.status == LIVEACTION_STATUS_SUCCEEDED and notify.on_success:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_success,
default_message_suffix='succeeded.')
if liveaction.status in LIVEACTION_FAILED_STATES and notify.on_failure:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_failure,
default_message_suffix='failed.')
def _post_notify_subsection_triggers(self, liveaction=None, execution=None,
notify_subsection=None,
default_message_suffix=None):
routes = (getattr(notify_subsection, 'routes') or
getattr(notify_subsection, 'channels', None))
execution_id = str(execution.id)
if routes and len(routes) >= 1:
payload = {}
message = notify_subsection.message or (
'Action ' + liveaction.action + ' ' + default_message_suffix)
data = notify_subsection.data or {}
jinja_context = self._build_jinja_context(liveaction=liveaction, execution=execution)
try:
message = self._transform_message(message=message,
context=jinja_context)
except:
LOG.exception('Failed (Jinja) transforming `message`.')
try:
data = self._transform_data(data=data, context=jinja_context)
except:
LOG.exception('Failed (Jinja) transforming `data`.')
# At this point convert result to a string. This restricts the rulesengines
# ability to introspect the result. On the other handle atleast a json usable
# result is sent as part of the notification. If jinja is required to convert
# to a string representation it uses str(...) which make it impossible to
# parse the result as json any longer.
# TODO: Use to_serializable_dict
data['result'] = json.dumps(liveaction.result)
payload['message'] = message
payload['data'] = data
payload['execution_id'] = execution_id
payload['status'] = liveaction.status
payload['start_timestamp'] = isotime.format(liveaction.start_timestamp)
try:
payload['end_timestamp'] = isotime.format(liveaction.end_timestamp)
except AttributeError:
# This can be raised if liveaction.end_timestamp is None, which is caused
# when policy cancels a request due to concurrency
# In this case, use datetime.now() instead
payload['end_timestamp'] = isotime.format(datetime.utcnow())
payload['action_ref'] = liveaction.action
payload['runner_ref'] = self._get_runner_ref(liveaction.action)
trace_context = self._get_trace_context(execution_id=execution_id)
failed_routes = []
for route in routes:
try:
payload['route'] = route
# Deprecated. Only for backward compatibility reasons.
payload['channel'] = route
LOG.debug('POSTing %s for %s. Payload - %s.', NOTIFY_TRIGGER_TYPE['name'],
liveaction.id, payload)
self._trigger_dispatcher.dispatch(self._notify_trigger, payload=payload,
trace_context=trace_context)
except:
failed_routes.append(route)
if len(failed_routes) > 0:
raise Exception('Failed notifications to routes: %s' % ', '.join(failed_routes))
def _build_jinja_context(self, liveaction, execution):
context = {}
context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)})
context.update({
DATASTORE_PARENT_SCOPE: {
SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE)
}
})
context.update({ACTION_PARAMETERS_KV_PREFIX: liveaction.parameters})
context.update({ACTION_CONTEXT_KV_PREFIX: liveaction.context})
context.update({ACTION_RESULTS_KV_PREFIX: execution.result})
return context
def _transform_message(self, message, context=None):
mapping = {'message': message}
context = context or {}
return (jinja_utils.render_values(mapping=mapping, context=context)).get('message',
message)
def _transform_data(self, data, context=None):
return jinja_utils.render_values(mapping=data, context=context)
def _get_trace_context(self, execution_id):
trace_db = trace_service.get_trace_db_by_action_execution(
action_execution_id=execution_id)
if trace_db:
return TraceContext(id_=str(trace_db.id), trace_tag=trace_db.trace_tag)
# If no trace_context is found then do not create a new one here. If necessary
# it shall be created downstream. Sure this is impl leakage of some sort.
return None
def _post_generic_trigger(self, liveaction=None, execution=None):
if not ACTION_SENSOR_ENABLED:
LOG.debug('Action trigger is disabled, skipping trigger dispatch...')
return
execution_id = str(execution.id)
payload = {'execution_id': execution_id,
'status': liveaction.status,
'start_timestamp': str(liveaction.start_timestamp),
# deprecate 'action_name' at some point and switch to 'action_ref'
'action_name': liveaction.action,
'action_ref': liveaction.action,
'runner_ref': self._get_runner_ref(liveaction.action),
'parameters': liveaction.get_masked_parameters(),
'result': liveaction.result}
# Use execution_id to extract trace rather than liveaction. execution_id
# will look-up an exact TraceDB while liveaction depending on context
# may not end up going to the DB.
trace_context = self._get_trace_context(execution_id=execution_id)
LOG.debug('POSTing %s for %s. Payload - %s. TraceContext - %s',
ACTION_TRIGGER_TYPE['name'], liveaction.id, payload, trace_context)
self._trigger_dispatcher.dispatch(self._action_trigger, payload=payload,
trace_context=trace_context)
def _apply_post_run_policies(self, liveaction_db):
# Apply policies defined for the action.
policy_dbs = Policy.query(resource_ref=liveaction_db.action, enabled=True)
LOG.debug('Applying %s post_run policies' % (len(policy_dbs)))
for policy_db in policy_dbs:
driver = policies.get_driver(policy_db.ref,
policy_db.policy_type,
**policy_db.parameters)
try:
LOG.debug('Applying post_run policy "%s" (%s) for liveaction %s' %
(policy_db.ref, policy_db.policy_type, str(liveaction_db.id)))
liveaction_db = driver.apply_after(liveaction_db)
except:
LOG.exception('An exception occurred while applying policy "%s".', policy_db.ref)
return liveaction_db
def _get_runner_ref(self, action_ref):
"""
Retrieve a runner reference for the provided action.
:rtype: ``str``
"""
action = Action.get_by_ref(action_ref)
return action['runner_type']['name']
def get_notifier():
with Connection(transport_utils.get_messaging_urls()) as conn:
return Notifier(conn, [ACTIONUPDATE_WORK_Q], trigger_dispatcher=TriggerDispatcher(LOG))
| |
#!/usr/bin/python
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.urls import ConnectionError, NoSSLError, open_url
from ansible.module_utils.basic import AnsibleModule, get_distribution
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
import ansible.module_utils.six.moves.urllib.error as urllib_error
from distutils.version import LooseVersion
import zipfile
import traceback
import time
import tempfile
import socket
import shutil
import re
import pwd
import os
import json
import hashlib
import grp
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: intellij_install_plugin
short_description: Installs the specified plugin for the specified user.
description:
- Installs the specified plugin for the specified user.
options:
plugin_manager_url:
description:
- URL of the JetBrains plugin manager API.
required: true
intellij_home:
description:
- The root directory of the IntelliJ installation.
required: true
intellij_user_plugins_dir:
description:
- This is the dir where the user's IntelliJ plugins are located.
required: true
owner:
description:
- The user to install the plugin for.
required: true
group:
description:
- The group for the files and directories created.
required: true
plugin_id:
description:
- The ID of the plugin to install.
required: true
download_cache:
description:
- The directory to cache downloads in.
required: true
author:
- John Freeman (GantSign Ltd.)
'''
EXAMPLES = '''
- name: install plugin
become: yes
intellij_install_plugin:
plugin_manager_url: 'https://plugins.jetbrains.com/pluginManager/'
intellij_home: '/opt/idea/idea-ultimate-2018.1.1'
intellij_user_plugins_dir: '.IntelliJIdea2018.1/config/plugins'
owner: bob
group: bob
plugin_id: google-java-format
download_cache: '/tmp/downloads'
'''
try:
from lxml import etree
HAS_LXML = True
except ImportError:
HAS_LXML = False
try:
from ansible.module_utils.six.moves.urllib.parse import urlencode, urljoin
HAS_URLPARSE = True
except BaseException:
HAS_URLPARSE = False
def make_dirs(module, path, mode, uid, gid):
dirs = [path]
dirname = os.path.dirname(path)
while dirname != '/':
dirs.insert(0, dirname)
dirname = os.path.dirname(dirname)
for dirname in dirs:
if not os.path.exists(dirname):
os.mkdir(dirname, mode)
os.chown(dirname, uid, gid)
def get_root_dirname_from_zip(module, zipfile_path):
if not os.path.isfile(zipfile_path):
module.fail_json(msg='File not found: %s' % zipfile_path)
with zipfile.ZipFile(zipfile_path, 'r') as z:
files = z.namelist()
if len(files) == 0:
module.fail_json(msg='Plugin is empty: %s' % zipfile_path)
return files[0].split('/')[0]
def extract_zip(module, output_dir, zipfile_path, uid, gid):
if not os.path.isfile(zipfile_path):
module.fail_json(msg='File not found: %s' % zipfile_path)
with zipfile.ZipFile(zipfile_path, 'r') as z:
z.extractall(output_dir)
files = z.namelist()
for file_entry in files:
absolute_file = os.path.join(output_dir, file_entry)
while not os.path.samefile(absolute_file, output_dir):
os.chown(absolute_file, uid, gid)
absolute_file = os.path.normpath(
os.path.join(absolute_file, os.pardir))
def fetch_url(module, url, method=None, timeout=10, follow_redirects=True):
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
# ensure we use proper tempdir
old_tempdir = tempfile.tempdir
tempfile.tempdir = module.tmpdir
r = None
info = dict(url=url, status=-1)
try:
r = open_url(url,
method=method,
timeout=timeout,
follow_redirects=follow_redirects)
# Lowercase keys, to conform to py2 behavior, so that py3 and py2 are
# predictable
info.update(dict((k.lower(), v) for k, v in r.info().items()))
# Don't be lossy, append header values for duplicate headers
# In Py2 there is nothing that needs done, py2 does this for us
if PY3:
temp_headers = {}
for name, value in r.headers.items():
# The same as above, lower case keys to match py2 behavior, and
# create more consistent results
name = name.lower()
if name in temp_headers:
temp_headers[name] = ', '.join((temp_headers[name], value))
else:
temp_headers[name] = value
info.update(temp_headers)
# finally update the result with a message about the fetch
info.update(
dict(msg='OK (%s bytes)' %
r.headers.get('Content-Length', 'unknown'),
url=r.geturl(),
status=r.code))
except NoSSLError as e:
distribution = get_distribution()
if distribution is not None and distribution.lower() == 'redhat':
module.fail_json(
msg='%s. You can also install python-ssl from EPEL' %
to_native(e), **info)
else:
module.fail_json(msg='%s' % to_native(e), **info)
except (ConnectionError, ValueError) as e:
module.fail_json(msg=to_native(e), **info)
except urllib_error.HTTPError as e:
try:
body = e.read()
except AttributeError:
body = ''
# Try to add exception info to the output but don't fail if we can't
try:
# Lowercase keys, to conform to py2 behavior, so that py3 and py2
# are predictable
info.update(dict((k.lower(), v) for k, v in e.info().items()))
except Exception:
pass
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
except urllib_error.URLError as e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg='Request failed: %s' % to_native(e), status=code))
except socket.error as e:
info.update(
dict(
msg='Connection failure: %s' %
to_native(e),
status=-
1))
except httplib.BadStatusLine as e:
info.update(
dict(
msg=('Connection failure: connection was closed before a valid'
' response was received: %s') %
to_native(
e.line),
status=-
1))
except Exception as e:
info.update(dict(msg='An unknown error occurred: %s' % to_native(e),
status=-1),
exception=traceback.format_exc())
finally:
tempfile.tempdir = old_tempdir
return r, info
def get_build_number_from_xml(module, intellij_home, xml):
info_doc = etree.parse(xml)
build = info_doc.find('./build/[@number]')
if build is None:
build = info_doc.find(
'./{http://jetbrains.org/intellij/schema/application-info}build/'
'[@number]'
)
if build is None:
module.fail_json(
msg=('Unable to determine IntelliJ version from path: %s '
'(unsupported schema - missing build element)') %
intellij_home)
build_number = build.get('number')
if build_number is None:
module.fail_json(
msg=('Unable to determine IntelliJ version from path: %s '
'(unsupported schema - missing build number value)') %
intellij_home)
return build_number
def get_build_number_from_jar(module, intellij_home):
resources_jar = os.path.join(intellij_home, 'lib', 'resources.jar')
if not os.path.isfile(resources_jar):
return None
with zipfile.ZipFile(resources_jar, 'r') as resource_zip:
try:
with resource_zip.open('idea/IdeaApplicationInfo.xml') as xml:
return get_build_number_from_xml(module, intellij_home, xml)
except KeyError:
try:
with resource_zip.open('idea/ApplicationInfo.xml') as xml:
return get_build_number_from_xml(module, intellij_home,
xml)
except KeyError:
module.fail_json(
msg=('Unable to determine IntelliJ version from path: %s '
'(XML info file not found in "lib/resources.jar")') %
intellij_home)
def get_build_number_from_json(module, intellij_home):
product_info_path = os.path.join(intellij_home, 'product-info.json')
if not os.path.isfile(product_info_path):
module.fail_json(
msg=('Unable to determine IntelliJ version from path: %s '
'("product-info.json" not found)') %
intellij_home)
with open(product_info_path) as product_info_file:
product_info = json.load(product_info_file)
return product_info['buildNumber']
def get_build_number(module, intellij_home):
return get_build_number_from_jar(
module, intellij_home) or get_build_number_from_json(
module, intellij_home)
def get_plugin_info(module, plugin_manager_url, intellij_home, plugin_id):
build_number = get_build_number(module, intellij_home)
params = {'action': 'download', 'build': build_number, 'id': plugin_id}
query_params = urlencode(params)
url = '%s?%s' % (plugin_manager_url, query_params)
for _ in range(0, 3):
resp, info = fetch_url(module,
url,
method='HEAD',
timeout=3,
follow_redirects=False)
if resp is not None:
resp.close()
status_code = info['status']
if status_code == 404:
module.fail_json(msg='Unable to find plugin "%s" for build "%s"' %
(plugin_id, build_number))
if status_code > -1 and status_code < 400:
break
# 3 retries 5 seconds appart
time.sleep(5)
if status_code == -1 or status_code >= 400:
module.fail_json(msg='Error querying url "%s": %s' %
(url, info['msg']))
location = info.get('location')
if location is None:
location = info.get('Location')
if location is None:
module.fail_json(msg='Unsupported HTTP response for: %s (status=%s)' %
(url, status_code))
if location.startswith('http'):
plugin_url = location
else:
plugin_url = urljoin(plugin_manager_url, location)
jar_pattern = re.compile(r'/(?P<file_name>[^/]+\.jar)(?:\?.*)$')
jar_matcher = jar_pattern.search(plugin_url)
if jar_matcher:
file_name = jar_matcher.group('file_name')
else:
versioned_pattern = re.compile(
r'(?P<plugin_id>[0-9]+)/(?P<update_id>[0-9]+)/'
r'(?P<file_name>[^/]+)(?:\?.*)$'
)
versioned_matcher = versioned_pattern.search(plugin_url)
if versioned_matcher:
file_name = '%s-%s-%s' % (versioned_matcher.group('plugin_id'),
versioned_matcher.group('update_id'),
versioned_matcher.group('file_name'))
else:
hash_object = hashlib.sha256(plugin_url)
file_name = '%s-%s.zip' % (plugin_id, hash_object.hexdigest())
return plugin_url, file_name
def download_plugin(module, plugin_url, file_name, download_cache):
if not os.path.isdir(download_cache):
os.makedirs(download_cache, 0o775)
download_path = os.path.join(download_cache, file_name)
if os.path.isfile(download_path):
return download_path
for _ in range(0, 3):
resp, info = fetch_url(module,
plugin_url,
method='GET',
timeout=20,
follow_redirects=True)
status_code = info['status']
if status_code >= 200 and status_code < 300:
tmp_dest = getattr(module, 'tmpdir', None)
fd, b_tempname = tempfile.mkstemp(dir=tmp_dest)
f = os.fdopen(fd, 'wb')
try:
shutil.copyfileobj(resp, f)
except Exception as e:
os.remove(b_tempname)
resp.close()
module.fail_json(
msg='Failed to create temporary content file: %s' %
to_native(e))
f.close()
resp.close()
module.atomic_move(to_native(b_tempname), download_path)
return download_path
if resp is not None:
resp.close()
module.fail_json(msg='Error downloading url "%s": %s' %
(plugin_url, info['msg']))
def install_plugin(module, plugin_manager_url, intellij_home, plugins_dir, uid,
gid, plugin_id, download_cache):
plugin_url, file_name = get_plugin_info(module, plugin_manager_url,
intellij_home, plugin_id)
plugin_path = download_plugin(module, plugin_url, file_name,
download_cache)
if not module.check_mode:
make_dirs(module, plugins_dir, 0o775, uid, gid)
if plugin_path.endswith('.jar'):
dest_path = os.path.join(plugins_dir, os.path.basename(plugin_path))
if os.path.exists(dest_path):
return False
if not module.check_mode:
shutil.copy(plugin_path, dest_path)
os.chown(dest_path, uid, gid)
os.chmod(dest_path, 0o664)
return True
else:
root_dirname = get_root_dirname_from_zip(module, plugin_path)
plugin_dir = os.path.join(plugins_dir, root_dirname)
if os.path.exists(plugin_dir):
return False
if not module.check_mode:
extract_zip(module, plugins_dir, plugin_path, uid, gid)
return True
def run_module():
module_args = dict(plugin_manager_url=dict(type='str', required=True),
intellij_home=dict(type='path', required=True),
intellij_user_plugins_dir=dict(type='path',
required=True),
owner=dict(type='str', required=True),
group=dict(type='str', required=True),
plugin_id=dict(type='str', required=True),
download_cache=dict(type='path', required=True))
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
plugin_manager_url = module.params['plugin_manager_url']
intellij_home = os.path.expanduser(module.params['intellij_home'])
owner = module.params['owner']
group = module.params['group']
try:
uid = int(owner)
except ValueError:
uid = pwd.getpwnam(owner).pw_uid
username = pwd.getpwuid(uid).pw_name
try:
gid = int(group)
except ValueError:
gid = grp.getgrnam(group).gr_gid
intellij_user_plugins_dir = os.path.expanduser(
os.path.join('~' + username,
module.params['intellij_user_plugins_dir']))
plugin_id = module.params['plugin_id']
download_cache = os.path.expanduser(module.params['download_cache'])
# Check if we have lxml 2.3.0 or newer installed
if not HAS_LXML:
module.fail_json(
msg='The xml ansible module requires the lxml python library '
'installed on the managed machine')
elif LooseVersion('.'.join(
to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
module.fail_json(
msg='The xml ansible module requires lxml 2.3.0 or newer '
'installed on the managed machine')
elif LooseVersion('.'.join(
to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
module.warn(
'Using lxml version lower than 3.0.0 does not guarantee '
'predictable element attribute order.'
)
changed = install_plugin(module, plugin_manager_url, intellij_home,
intellij_user_plugins_dir, uid, gid, plugin_id,
download_cache)
if changed:
msg = 'Plugin %s has been installed' % username
else:
msg = 'Plugin %s was already installed' % username
module.exit_json(changed=changed, msg=msg)
def main():
run_module()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
import collections
import hashlib
import json
import os
import string
import subprocess
import sys
import threading
import time
try:
import Queue as queue
except ImportError:
import queue
from powerline.lib import threaded
import powerline
HOTLIST_LOW = 0
HOTLIST_MESSAGE = 1
HOTLIST_PRIVATE = 2
HOTLIST_HIGHLIGHT = 3
PRIORITIES = (HOTLIST_LOW, HOTLIST_MESSAGE, HOTLIST_PRIVATE, HOTLIST_HIGHLIGHT)
HOTLIST_SUMMARY = {HOTLIST_LOW: 'low', HOTLIST_MESSAGE: 'msg',
HOTLIST_PRIVATE: 'prv', HOTLIST_HIGHLIGHT: 'hl'}
FMT_COUNT = 'count'
FMT_SUMMARY = 'summary'
FMT_BUFFERS = 'buffers'
INOTIFY_CMD = ('bash -c "cat {hotlist_file};echo;'
'inotifywait -e close_write -m -q {hotlist_file} | while read;'
'do cat {hotlist_file};echo;done"')
REMOTES = {}
class OutputThread(threading.Thread):
daemon = True
def __init__(self, out, q, shutdown):
threading.Thread.__init__(self)
self.out = out
self.q = q
self.shutdown = shutdown
def run(self):
for line in iter(self.out.readline, b''):
if self.shutdown.is_set():
break
self.q.put(line)
if self.shutdown.is_set():
break
self.out.close()
def keys_to_int(data):
result = {}
for k, v in data.iteritems():
# NOTE(jkoelker) this is ok since we only have 0-3 ;)
if k in string.digits:
k = int(k)
if isinstance(v, list):
v = [keys_to_int(i) for i in v]
result[k] = v
return result
def unwind_queue(q, logger=lambda x: None):
while not q.empty():
data = q.get(True)
logger(data)
def start_transport(shutdown, cmd, out_q, err_q, **kwargs):
null = open(os.devnull)
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=null, bufsize=1,
close_fds=('posix' in sys.builtin_module_names))
out = OutputThread(p.stdout, out_q, shutdown)
err = OutputThread(p.stderr, err_q, shutdown)
out.start()
err.start()
return p
def setup_transport(host, command, transport, transport_args):
cmd = [transport]
for transport_arg in transport_args.split():
cmd.append(transport_arg)
cmd.append(host)
cmd.append(command)
out_q = queue.Queue()
err_q = queue.Queue()
shutdown = threading.Event()
transport = None
return {'out_q': out_q, 'err_q': err_q, 'shutdown': shutdown,
'transport': transport, 'cmd': cmd, 'host': host}
def shutdown_transport(transport, shutdown, out_q, err_q, logger, **kwargs):
if transport is not None:
logger('Terminating transport')
transport.terminate()
count = 0
while transport.poll() is None and count < 50:
time.sleep(0.1)
count = count + 1
if transport.poll() is None:
logger('Transport did not die, killing')
transport.kill()
unwind_queue(out_q)
unwind_queue(err_q)
shutdown.set()
def remote_key(host, command, transport, transport_args):
value = ''.join((host, command, transport, transport_args))
return hashlib.md5(value).hexdigest()
def get_lines(q, logger):
lines = []
while not q.empty():
try:
line = q.get(block=True, timeout=0.1)
except queue.Empty:
break
line = line[:-1].decode('utf-8')
logger(line)
lines.append(line)
return lines
def process_remote(remote, logger):
log_prefix = logger.prefix + ':' + remote['host']
if remote['transport'] is None:
logger.debug('Starting transport', prefix=log_prefix)
remote['transport'] = start_transport(**remote)
if remote['transport'].poll() is not None:
logger.debug('Transport dead, restarting')
log = lambda x: logger.error('stderr: {}', x,
prefix=log_prefix)
unwind_queue(remote['err_q'], logger=log)
log = lambda x: logger.debug('stdout: {}', x,
prefix=log_prefix)
unwind_queue(remote['out_q'], logger=log)
remote['shutdown'].set()
remote['shutdown'] = threading.Event()
remote['transport'] = start_transport(**remote)
# NOTE(jkoelker) Keep the stderr queue clean
log = lambda x: logger.debug('stderr: {}', x,
prefix=log_prefix)
unwind_queue(remote['err_q'], logger=log)
log = lambda x: logger.debug('stdout: {}', x,
prefix=log_prefix)
lines = get_lines(remote['out_q'], log)
for line in lines:
for data_queue in remote['data_queues']:
data_queue.put(line)
class RemoteDispatcher(threading.Thread):
daemon = True
def __init__(self, shutdown, logger):
threading.Thread.__init__(self)
self.remotes = {}
self.shutdown = shutdown
self.logger = logger
def _shutdown(self):
self.logger.debug('Shutting down transports')
for remote in self.remotes.itervalues():
log_prefix = self.logger.prefix + ':' + remote['host']
logger = lambda x: logger.debug(x, prefix=log_prefix)
shutdown_transport(logger=logger, **remote)
def add_remote(self, host, command, transport='ssh', transport_args=''):
key = remote_key(host, command, transport, transport_args)
if key not in self.remotes:
remote = setup_transport(host, command, transport=transport,
transport_args=transport_args)
remote['data_queues'] = []
self.remotes[key] = remote
data_queue = queue.Queue()
self.remotes[key]['data_queues'].append(data_queue)
return data_queue
def run(self):
while not self.shutdown.is_set():
for remote in self.remotes.values():
process_remote(remote, self.logger)
time.sleep(0.5)
self._shutdown()
Key = collections.namedtuple('Key', ('host', 'format', 'min_priority',
'buffers', 'buffers_exclude',
'hotlist_file', 'command', 'transport',
'transport_args'))
class Hotlist(threaded.KwThreadedSegment):
drop_interval = 0
def __init__(self, *args, **kwargs):
threaded.KwThreadedSegment.__init__(self, *args, **kwargs)
self.data_queues = {}
self.state_cache = {}
def shutdown(self, *args, **kwargs):
self._dispatcher_shutdown.set()
self.dispatcher.join(0.02)
threaded.KwThreadedSegment.shutdown(self, *args, **kwargs)
def start(self, *args, **kwargs):
self._dispatcher_shutdown = threading.Event()
self.dispatcher = RemoteDispatcher(self._dispatcher_shutdown,
self.logger)
self.dispatcher.start()
threaded.KwThreadedSegment.start(self, *args, **kwargs)
def startup(self, pl, *args, **kwargs):
self.logger = powerline.PowerlineLogger(pl.use_daemon_threads,
pl.logger,
pl.ext)
self.logger.prefix = self.__class__.__name__.lower()
threaded.KwThreadedSegment.startup(self, self.logger, *args, **kwargs)
@staticmethod
def key(host, format=FMT_COUNT, min_priority=2, buffers=None,
buffers_exclude=None, hotlist_file='$HOME/.weechat/hotlist.json',
command=INOTIFY_CMD, transport='ssh', transport_args='', **kwargs):
if buffers is None:
buffers = []
if buffers_exclude is None:
buffers_exclude = []
buffers = tuple(buffers)
buffers_exclude = tuple(buffers_exclude)
min_priority = int(min_priority)
command = command.format(hotlist_file=hotlist_file, host=host,
transport=transport, **kwargs)
return Key(host=host, format=format, min_priority=min_priority,
buffers=buffers, buffers_exclude=buffers_exclude,
hotlist_file=hotlist_file, command=command,
transport=transport, transport_args=transport_args)
def _get_data(self, data_queue, host):
if data_queue.empty():
return
try:
data = data_queue.get_nowait()
except queue.Empty:
return
# NOTE(jkoelker) gaurd against the queue becoming backed up
queue_size = data_queue.qsize()
if queue_size > 1:
self.logger.debug('Dropping {} messages from {}', queue_size, host)
for _count in xrange(queue_size):
try:
data = data_queue.get_nowait()
except queue.Empty:
break
if not data:
return
# NOTE(jkoelker) Simple 'sure, it looks like json' check
if data[0] != '{' or data[-1] != '}':
self.logger.debug('Data does not look like json: {}', data)
return
try:
data = json.loads(data)
except ValueError:
self.exception('Data is not JSON: {}', data)
return
data = keys_to_int(data)
return data
def compute_state(self, key):
if not key.host:
self.logger.warn('Host not defined in config')
return None
if key not in self.data_queues:
data_queue = self.dispatcher.add_remote(key.host,
key.command,
key.transport,
key.transport_args)
self.data_queues[key] = data_queue
data_queue = self.data_queues[key]
data = self._get_data(data_queue, key.host)
if not data:
return self.state_cache.get(key)
priorities = [p for p in PRIORITIES if p >= key.min_priority]
state = {}
if key.format == FMT_COUNT:
state[FMT_COUNT] = self._count(data, priorities,
key.buffers,
key.buffers_exclude)
elif key.format == FMT_SUMMARY:
state[FMT_SUMMARY] = self._summary(data, priorities,
key.buffers,
key.buffers_exclude)
if not state:
return self.state_cache.get(key)
self.state_cache[key] = state
self.logger.debug(str(state))
return state
@staticmethod
def _count(data, priorities, buffers, buffers_exclude):
count = 0
# TODO(jkoelker) remove the double loop
for priority in priorities:
for i in data['hotlist']:
if i['buffer_name'] in buffers_exclude:
continue
elif i['short_name'] in buffers_exclude:
continue
if buffers:
if i['buffer_name'] not in buffers:
continue
elif i['short_name'] not in buffers:
continue
count = count + i[priority]
if count:
return count
@staticmethod
def _summary(data, priorities, buffers, buffers_exclude):
content = {}
# TODO(jkoelker) remove the double loop
for priority in priorities:
count = 0
for i in data['hotlist']:
if i['buffer_name'] in buffers_exclude:
continue
elif i['short_name'] in buffers_exclude:
continue
if buffers:
if i['buffer_name'] not in buffers:
continue
elif i['short_name'] not in buffers:
continue
count = count + i[priority]
if count:
content[priority] = count
if content:
return content
@staticmethod
def render_one(state, format=FMT_COUNT, **kwargs):
if not state:
return
result = []
default_groups = ['hotlist', 'email_alert']
divider = 'hotlist:divider'
if format == FMT_COUNT:
if state.get(FMT_COUNT):
result.append({'contents': str(state[FMT_COUNT]),
'divider_highlight_group': 'background:divider',
'highlight_group': default_groups})
elif format == FMT_SUMMARY:
fmt = kwargs.get('summary_format', {})
space_divider = kwargs.get('space_divider', False)
draw_inner_divider = not space_divider
if state.get(FMT_SUMMARY):
for p, count in state[FMT_SUMMARY].iteritems():
level = HOTLIST_SUMMARY[p]
content_fmt = fmt.get(level,
'%s:{count}' % level[0].upper())
contents = content_fmt.format(count=count)
groups = ['hotlist_' + HOTLIST_SUMMARY[p]] + default_groups
if space_divider:
contents = contents + ' '
result.append({'contents': contents,
'divider_highlight_group': divider,
'highlight_group': groups,
'draw_inner_divider': draw_inner_divider})
if (result and space_divider and
result[-1]['contents'][-1] == ' '):
result[-1]['contents'] = result[-1]['contents'][:-1]
return result
hotlist = Hotlist()
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from cinder.openstack.common import context as request_context
from cinder.openstack.common.db.sqlalchemy import models
from cinder.openstack.common.gettextutils import _, _LI, _LW
from cinder.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
..code:: python
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
.. warning::
Do not use this method when creating ForeignKeys in database migrations
because sqlalchemy needs the same MetaData object to hold information
about the parent table and the reference table in the ForeignKey. This
method uses a unique MetaData object per table object so it won't work
with ForeignKey creation.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
DEPRECATED: this function is deprecated and will be removed from cinder.db
in a few releases. Please use UniqueConstraint.drop() method directly for
sqlalchemy-migrate migration scripts.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
from migrate.changeset import UniqueConstraint
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = sqlalchemy.sql.select(
columns_for_select, group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = sqlalchemy.sql.select(
[table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_DATA_TYPES = [dtypes.half, dtypes.float32]
_TEST_PARAM_VALUES = [
# learning_rate, rho, momentum, epsilon, centered, use_resource
[0.5, 0.9, 0.0, 1.0, True, False],
[0.5, 0.9, 0.0, 1.0, False, False],
[0.5, 0.9, 0.0, 1.0, True, True],
[0.5, 0.9, 0.0, 1.0, False, True],
[0.1, 0.9, 0.0, 1.0, True, False],
[0.5, 0.95, 0.0, 1.0, False, False],
[0.5, 0.8, 0.0, 1e-3, True, False],
[0.5, 0.8, 0.9, 1e-3, True, False],
]
class RMSPropOptimizerTest(test.TestCase, parameterized.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum,
centered):
rms_t = rms * rho + (1 - rho) * g * g
if centered:
mg_t = mg * rho + (1 - rho) * g
denom_t = rms_t - mg_t * mg_t
else:
mg_t = mg
denom_t = rms_t
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, rho, momentum, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue
denom_t = rms_t[gindex]
if centered:
mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue
denom_t -= mg_t[gindex] * mg_t[gindex]
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t)
var_t[gindex] = var[gindex] - mom_t[gindex]
return var_t, mg_t, rms_t, mom_t
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
dtype=_DATA_TYPES, param_value=_TEST_PARAM_VALUES))
def testDense(self, dtype, param_value):
(learning_rate, rho, momentum, epsilon, centered,
use_resource) = tuple(param_value)
with self.test_session(use_gpu=True):
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = rmsprop.RMSProp(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mom0 = opt.get_slot(var0, "momentum")
self.assertIsNotNone(mom0)
mom1 = opt.get_slot(var1, "momentum")
self.assertIsNotNone(mom1)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([epsilon, epsilon], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([epsilon, epsilon], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 4 steps of RMSProp
for _ in range(4):
update.run()
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,
momentum, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,
momentum, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(
var0_np, var0.eval(), half_rtol=0.01, half_atol=0.01)
self.assertAllCloseAccordingToType(
var1_np, var1.eval(), half_rtol=0.01, half_atol=0.01)
@parameterized.parameters([dtypes.float32, dtypes.float64])
def testMinimizeSparseResourceVariable(self, dtype):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSProp(
learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0,
centered=False).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0., 1.]], var0.eval(), atol=0.01)
@parameterized.parameters([dtypes.float32, dtypes.float64])
def testMinimizeSparseResourceVariableCentered(self, dtype):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSProp(
learning_rate=1.0, rho=0.1, momentum=0.0, epsilon=1.0,
centered=True).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[-7/3.0, -4/3.0]], var0.eval(), atol=0.01)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
dtype=_DATA_TYPES, param_value=_TEST_PARAM_VALUES))
def testSparse(self, dtype, param_value):
(learning_rate, rho, momentum, epsilon, centered, _) = tuple(param_value)
with self.test_session(use_gpu=True):
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([1]))
grads1_np_indices = np.array([1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([1]))
opt = rmsprop.RMSProp(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mom0 = opt.get_slot(var0, "momentum")
self.assertIsNotNone(mom0)
mom1 = opt.get_slot(var1, "momentum")
self.assertIsNotNone(mom1)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([epsilon, epsilon], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([epsilon, epsilon], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 4 steps of RMSProp
for _ in range(4):
update.run()
var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
learning_rate, rho, momentum, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
learning_rate, rho, momentum, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
@parameterized.parameters(_DATA_TYPES)
def testWithoutMomentum(self, dtype):
with self.test_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
opt = rmsprop.RMSProp(
learning_rate=2.0, rho=0.9, momentum=0.0, epsilon=1.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mom0 = opt.get_slot(var0, "momentum")
self.assertIsNotNone(mom0)
mom1 = opt.get_slot(var1, "momentum")
self.assertIsNotNone(mom1)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
update.run()
# Check the root mean square accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901, 0.901]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001, 0.90001]), rms1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001))
]), var1.eval())
# Step 2: the root mean square accumulators contain the previous update.
update.run()
# Check the rms accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]), rms1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5))
]), var1.eval())
@parameterized.parameters(_DATA_TYPES)
def testWithMomentum(self, dtype):
with self.test_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
opt = rmsprop.RMSProp(
learning_rate=2.0, rho=0.9, momentum=0.5, epsilon=1.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mom0 = opt.get_slot(var0, "momentum")
self.assertIsNotNone(mom0)
mom1 = opt.get_slot(var1, "momentum")
self.assertIsNotNone(mom1)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: rms = 1, mom = 0. So we should see a normal
# update: v -= grad * learning_rate
update.run()
# Check the root mean square accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901, 0.901]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001, 0.90001]), rms1.eval())
# Check the momentum accumulators
self.assertAllCloseAccordingToType(
np.array([(0.1 * 2.0 / math.sqrt(0.901)),
(0.1 * 2.0 / math.sqrt(0.901))]), mom0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.01 * 2.0 / math.sqrt(0.90001)),
(0.01 * 2.0 / math.sqrt(0.90001))]), mom1.eval())
# Check that the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001))
]), var1.eval())
# Step 2: the root mean square accumulators contain the previous update.
update.run()
# Check the rms accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]), rms1.eval())
self.assertAllCloseAccordingToType(
np.array([
0.5 * (0.1 * 2.0 / math.sqrt(0.901)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001)),
0.5 * (0.1 * 2.0 / math.sqrt(0.901)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001))
]), mom0.eval())
self.assertAllCloseAccordingToType(
np.array([
0.5 * (0.01 * 2.0 / math.sqrt(0.90001)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5)),
0.5 * (0.01 * 2.0 / math.sqrt(0.90001)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5))
]), mom1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901)) -
(0.5 * (0.1 * 2.0 / math.sqrt(0.901)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001))),
2.0 - (0.1 * 2.0 / math.sqrt(0.901)) -
(0.5 * (0.1 * 2.0 / math.sqrt(0.901)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001)))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001)) -
(0.5 * (0.01 * 2.0 / math.sqrt(0.90001)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5))),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001)) -
(0.5 * (0.01 * 2.0 / math.sqrt(0.90001)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5)))
]), var1.eval())
if __name__ == "__main__":
test.main()
| |
# Copyright (c) 2020, 2021 The Linux Foundation
#
# SPDX-License-Identifier: Apache-2.0
import hashlib
import os
import re
from west import log
from zspdx.licenses import LICENSES
from zspdx.util import getHashes
# ScannerConfig contains settings used to configure how the SPDX
# Document scanning should occur.
class ScannerConfig:
def __init__(self):
super(ScannerConfig, self).__init__()
# when assembling a Package's data, should we auto-conclude the
# Package's license, based on the licenses of its Files?
self.shouldConcludePackageLicense = True
# when assembling a Package's Files' data, should we auto-conclude
# each File's license, based on its detected license(s)?
self.shouldConcludeFileLicenses = True
# number of lines to scan for SPDX-License-Identifier (0 = all)
# defaults to 20
self.numLinesScanned = 20
# should we calculate SHA256 hashes for each Package's Files?
# note that SHA1 hashes are mandatory, per SPDX 2.2
self.doSHA256 = True
# should we calculate MD5 hashes for each Package's Files?
self.doMD5 = False
def parseLineForExpression(line):
"""Return parsed SPDX expression if tag found in line, or None otherwise."""
p = line.partition("SPDX-License-Identifier:")
if p[2] == "":
return None
# strip away trailing comment marks and whitespace, if any
expression = p[2].strip()
expression = expression.rstrip("/*")
expression = expression.strip()
return expression
def getExpressionData(filePath, numLines):
"""
Scans the specified file for the first SPDX-License-Identifier:
tag in the file.
Arguments:
- filePath: path to file to scan.
- numLines: number of lines to scan for an expression before
giving up. If 0, will scan the entire file.
Returns: parsed expression if found; None if not found.
"""
log.dbg(f" - getting licenses for {filePath}")
with open(filePath, "r") as f:
try:
lineno = 0
for line in f:
lineno += 1
if lineno > numLines > 0:
break
expression = parseLineForExpression(line)
if expression is not None:
return expression
except UnicodeDecodeError:
# invalid UTF-8 content
return None
# if we get here, we didn't find an expression
return None
def splitExpression(expression):
"""
Parse a license expression into its constituent identifiers.
Arguments:
- expression: SPDX license expression
Returns: array of split identifiers
"""
# remove parens and plus sign
e2 = re.sub(r'\(|\)|\+', "", expression, flags=re.IGNORECASE)
# remove word operators, ignoring case, leaving a blank space
e3 = re.sub(r' AND | OR | WITH ', " ", e2, flags=re.IGNORECASE)
# and split on space
e4 = e3.split(" ")
return sorted(e4)
def calculateVerificationCode(pkg):
"""
Calculate the SPDX Package Verification Code for all files in the package.
Arguments:
- pkg: Package
Returns: verification code as string
"""
hashes = []
for f in pkg.files.values():
hashes.append(f.sha1)
hashes.sort()
filelist = "".join(hashes)
hSHA1 = hashlib.sha1()
hSHA1.update(filelist.encode('utf-8'))
return hSHA1.hexdigest()
def checkLicenseValid(lic, doc):
"""
Check whether this license ID is a valid SPDX license ID, and add it
to the custom license IDs set for this Document if it isn't.
Arguments:
- lic: detected license ID
- doc: Document
"""
if lic not in LICENSES:
doc.customLicenseIDs.add(lic)
def getPackageLicenses(pkg):
"""
Extract lists of all concluded and infoInFile licenses seen.
Arguments:
- pkg: Package
Returns: sorted list of concluded license exprs,
sorted list of infoInFile ID's
"""
licsConcluded = set()
licsFromFiles = set()
for f in pkg.files.values():
licsConcluded.add(f.concludedLicense)
for licInfo in f.licenseInfoInFile:
licsFromFiles.add(licInfo)
return sorted(list(licsConcluded)), sorted(list(licsFromFiles))
def normalizeExpression(licsConcluded):
"""
Combine array of license expressions into one AND'd expression,
adding parens where needed.
Arguments:
- licsConcluded: array of license expressions
Returns: string with single AND'd expression.
"""
# return appropriate for simple cases
if len(licsConcluded) == 0:
return "NOASSERTION"
if len(licsConcluded) == 1:
return licsConcluded[0]
# more than one, so we'll need to combine them
# iff an expression has spaces, it needs parens
revised = []
for lic in licsConcluded:
if lic in ["NONE", "NOASSERTION"]:
continue
if " " in lic:
revised.append(f"({lic})")
else:
revised.append(lic)
return " AND ".join(revised)
def scanDocument(cfg, doc):
"""
Scan for licenses and calculate hashes for all Files and Packages
in this Document.
Arguments:
- cfg: ScannerConfig
- doc: Document
"""
for pkg in doc.pkgs.values():
log.inf(f"scanning files in package {pkg.cfg.name} in document {doc.cfg.name}")
# first, gather File data for this package
for f in pkg.files.values():
# set relpath based on package's relativeBaseDir
f.relpath = os.path.relpath(f.abspath, pkg.cfg.relativeBaseDir)
# get hashes for file
hashes = getHashes(f.abspath)
if not hashes:
log.wrn("unable to get hashes for file {f.abspath}; skipping")
continue
hSHA1, hSHA256, hMD5 = hashes
f.sha1 = hSHA1
if cfg.doSHA256:
f.sha256 = hSHA256
if cfg.doMD5:
f.md5 = hMD5
# get licenses for file
expression = getExpressionData(f.abspath, cfg.numLinesScanned)
if expression:
if cfg.shouldConcludeFileLicenses:
f.concludedLicense = expression
f.licenseInfoInFile = splitExpression(expression)
# check if any custom license IDs should be flagged for document
for lic in f.licenseInfoInFile:
checkLicenseValid(lic, doc)
# now, assemble the Package data
licsConcluded, licsFromFiles = getPackageLicenses(pkg)
if cfg.shouldConcludePackageLicense:
pkg.concludedLicense = normalizeExpression(licsConcluded)
pkg.licenseInfoFromFiles = licsFromFiles
pkg.verificationCode = calculateVerificationCode(pkg)
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains well known classes.
This files defines well known classes which need extra maintenance including:
- Any
- Duration
- FieldMask
- Struct
- Timestamp
"""
__author__ = 'jieluo@google.com (Jie Luo)'
import calendar
from datetime import datetime
from datetime import timedelta
import six
try:
# Since python 3
import collections.abc as collections_abc
except ImportError:
# Won't work after python 3.8
import collections as collections_abc
from google.protobuf.descriptor import FieldDescriptor
_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S'
_NANOS_PER_SECOND = 1000000000
_NANOS_PER_MILLISECOND = 1000000
_NANOS_PER_MICROSECOND = 1000
_MILLIS_PER_SECOND = 1000
_MICROS_PER_SECOND = 1000000
_SECONDS_PER_DAY = 24 * 3600
_DURATION_SECONDS_MAX = 315576000000
class Any(object):
"""Class for Any Message type."""
__slots__ = ()
def Pack(self, msg, type_url_prefix='type.googleapis.com/',
deterministic=None):
"""Packs the specified message into current Any message."""
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/':
self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
else:
self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
self.value = msg.SerializeToString(deterministic=deterministic)
def Unpack(self, msg):
"""Unpacks the current Any message into specified message."""
descriptor = msg.DESCRIPTOR
if not self.Is(descriptor):
return False
msg.ParseFromString(self.value)
return True
def TypeName(self):
"""Returns the protobuf type name of the inner message."""
# Only last part is to be used: b/25630112
return self.type_url.split('/')[-1]
def Is(self, descriptor):
"""Checks if this Any represents the given protobuf type."""
return '/' in self.type_url and self.TypeName() == descriptor.full_name
_EPOCH_DATETIME = datetime.utcfromtimestamp(0)
class Timestamp(object):
"""Class for Timestamp message type."""
__slots__ = ()
def ToJsonString(self):
"""Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
"""
nanos = self.nanos % _NANOS_PER_SECOND
total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND
seconds = total_sec % _SECONDS_PER_DAY
days = (total_sec - seconds) // _SECONDS_PER_DAY
dt = datetime(1970, 1, 1) + timedelta(days, seconds)
result = dt.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos
def FromJsonString(self, value):
"""Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ValueError: On parsing problems.
"""
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ValueError(
'Failed to parse timestamp: missing valid timezone offset.')
time_value = value[0:timezone_offset]
# Parse datetime and nanos.
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
if 't' in second_value:
raise ValueError(
'time data \'{0}\' does not match format \'%Y-%m-%dT%H:%M:%S\', '
'lowercase \'t\' is not accepted'.format(second_value))
date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT)
td = date_object - datetime(1970, 1, 1)
seconds = td.seconds + td.days * _SECONDS_PER_DAY
if len(nano_value) > 9:
raise ValueError(
'Failed to parse Timestamp: nanos {0} more than '
'9 fractional digits.'.format(nano_value))
if nano_value:
nanos = round(float('0.' + nano_value) * 1e9)
else:
nanos = 0
# Parse timezone offsets.
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ValueError('Failed to parse timestamp: invalid trailing'
' data {0}.'.format(value))
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ValueError(
'Invalid timezone offset value: {0}.'.format(timezone))
if timezone[0] == '+':
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
# Set seconds and nanos
self.seconds = int(seconds)
self.nanos = int(nanos)
def GetCurrentTime(self):
"""Get the current UTC into Timestamp."""
self.FromDatetime(datetime.utcnow())
def ToNanoseconds(self):
"""Converts Timestamp to nanoseconds since epoch."""
return self.seconds * _NANOS_PER_SECOND + self.nanos
def ToMicroseconds(self):
"""Converts Timestamp to microseconds since epoch."""
return (self.seconds * _MICROS_PER_SECOND +
self.nanos // _NANOS_PER_MICROSECOND)
def ToMilliseconds(self):
"""Converts Timestamp to milliseconds since epoch."""
return (self.seconds * _MILLIS_PER_SECOND +
self.nanos // _NANOS_PER_MILLISECOND)
def ToSeconds(self):
"""Converts Timestamp to seconds since epoch."""
return self.seconds
def FromNanoseconds(self, nanos):
"""Converts nanoseconds since epoch to Timestamp."""
self.seconds = nanos // _NANOS_PER_SECOND
self.nanos = nanos % _NANOS_PER_SECOND
def FromMicroseconds(self, micros):
"""Converts microseconds since epoch to Timestamp."""
self.seconds = micros // _MICROS_PER_SECOND
self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND
def FromMilliseconds(self, millis):
"""Converts milliseconds since epoch to Timestamp."""
self.seconds = millis // _MILLIS_PER_SECOND
self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND
def FromSeconds(self, seconds):
"""Converts seconds since epoch to Timestamp."""
self.seconds = seconds
self.nanos = 0
def ToDatetime(self):
"""Converts Timestamp to datetime."""
return _EPOCH_DATETIME + timedelta(
seconds=self.seconds, microseconds=_RoundTowardZero(
self.nanos, _NANOS_PER_MICROSECOND))
def FromDatetime(self, dt):
"""Converts datetime to Timestamp."""
# Using this guide: http://wiki.python.org/moin/WorkingWithTime
# And this conversion guide: http://docs.python.org/library/time.html
# Turn the date parameter into a tuple (struct_time) that can then be
# manipulated into a long value of seconds. During the conversion from
# struct_time to long, the source date in UTC, and so it follows that the
# correct transformation is calendar.timegm()
self.seconds = calendar.timegm(dt.utctimetuple())
self.nanos = dt.microsecond * _NANOS_PER_MICROSECOND
class Duration(object):
"""Class for Duration message type."""
__slots__ = ()
def ToJsonString(self):
"""Converts Duration to string format.
Returns:
A string converted from self. The string format will contains
3, 6, or 9 fractional digits depending on the precision required to
represent the exact Duration value. For example: "1s", "1.010s",
"1.000000100s", "-3.100s"
"""
_CheckDurationValid(self.seconds, self.nanos)
if self.seconds < 0 or self.nanos < 0:
result = '-'
seconds = - self.seconds + int((0 - self.nanos) // 1e9)
nanos = (0 - self.nanos) % 1e9
else:
result = ''
seconds = self.seconds + int(self.nanos // 1e9)
nanos = self.nanos % 1e9
result += '%d' % seconds
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 's'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03ds' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06ds' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09ds' % nanos
def FromJsonString(self, value):
"""Converts a string to Duration.
Args:
value: A string to be converted. The string must end with 's'. Any
fractional digits (or none) are accepted as long as they fit into
precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s
Raises:
ValueError: On parsing problems.
"""
if len(value) < 1 or value[-1] != 's':
raise ValueError(
'Duration must end with letter "s": {0}.'.format(value))
try:
pos = value.find('.')
if pos == -1:
seconds = int(value[:-1])
nanos = 0
else:
seconds = int(value[:pos])
if value[0] == '-':
nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9))
else:
nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9))
_CheckDurationValid(seconds, nanos)
self.seconds = seconds
self.nanos = nanos
except ValueError as e:
raise ValueError(
'Couldn\'t parse duration: {0} : {1}.'.format(value, e))
def ToNanoseconds(self):
"""Converts a Duration to nanoseconds."""
return self.seconds * _NANOS_PER_SECOND + self.nanos
def ToMicroseconds(self):
"""Converts a Duration to microseconds."""
micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)
return self.seconds * _MICROS_PER_SECOND + micros
def ToMilliseconds(self):
"""Converts a Duration to milliseconds."""
millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND)
return self.seconds * _MILLIS_PER_SECOND + millis
def ToSeconds(self):
"""Converts a Duration to seconds."""
return self.seconds
def FromNanoseconds(self, nanos):
"""Converts nanoseconds to Duration."""
self._NormalizeDuration(nanos // _NANOS_PER_SECOND,
nanos % _NANOS_PER_SECOND)
def FromMicroseconds(self, micros):
"""Converts microseconds to Duration."""
self._NormalizeDuration(
micros // _MICROS_PER_SECOND,
(micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND)
def FromMilliseconds(self, millis):
"""Converts milliseconds to Duration."""
self._NormalizeDuration(
millis // _MILLIS_PER_SECOND,
(millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND)
def FromSeconds(self, seconds):
"""Converts seconds to Duration."""
self.seconds = seconds
self.nanos = 0
def ToTimedelta(self):
"""Converts Duration to timedelta."""
return timedelta(
seconds=self.seconds, microseconds=_RoundTowardZero(
self.nanos, _NANOS_PER_MICROSECOND))
def FromTimedelta(self, td):
"""Converts timedelta to Duration."""
self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY,
td.microseconds * _NANOS_PER_MICROSECOND)
def _NormalizeDuration(self, seconds, nanos):
"""Set Duration by seconds and nanos."""
# Force nanos to be negative if the duration is negative.
if seconds < 0 and nanos > 0:
seconds += 1
nanos -= _NANOS_PER_SECOND
self.seconds = seconds
self.nanos = nanos
def _CheckDurationValid(seconds, nanos):
if seconds < -_DURATION_SECONDS_MAX or seconds > _DURATION_SECONDS_MAX:
raise ValueError(
'Duration is not valid: Seconds {0} must be in range '
'[-315576000000, 315576000000].'.format(seconds))
if nanos <= -_NANOS_PER_SECOND or nanos >= _NANOS_PER_SECOND:
raise ValueError(
'Duration is not valid: Nanos {0} must be in range '
'[-999999999, 999999999].'.format(nanos))
if (nanos < 0 and seconds > 0) or (nanos > 0 and seconds < 0):
raise ValueError(
'Duration is not valid: Sign mismatch.')
def _RoundTowardZero(value, divider):
"""Truncates the remainder part after division."""
# For some languages, the sign of the remainder is implementation
# dependent if any of the operands is negative. Here we enforce
# "rounded toward zero" semantics. For example, for (-5) / 2 an
# implementation may give -3 as the result with the remainder being
# 1. This function ensures we always return -2 (closer to zero).
result = value // divider
remainder = value % divider
if result < 0 and remainder > 0:
return result + 1
else:
return result
class FieldMask(object):
"""Class for FieldMask message type."""
__slots__ = ()
def ToJsonString(self):
"""Converts FieldMask to string according to proto3 JSON spec."""
camelcase_paths = []
for path in self.paths:
camelcase_paths.append(_SnakeCaseToCamelCase(path))
return ','.join(camelcase_paths)
def FromJsonString(self, value):
"""Converts string to FieldMask according to proto3 JSON spec."""
self.Clear()
if value:
for path in value.split(','):
self.paths.append(_CamelCaseToSnakeCase(path))
def IsValidForDescriptor(self, message_descriptor):
"""Checks whether the FieldMask is valid for Message Descriptor."""
for path in self.paths:
if not _IsValidPath(message_descriptor, path):
return False
return True
def AllFieldsFromDescriptor(self, message_descriptor):
"""Gets all direct fields of Message Descriptor to FieldMask."""
self.Clear()
for field in message_descriptor.fields:
self.paths.append(field.name)
def CanonicalFormFromMask(self, mask):
"""Converts a FieldMask to the canonical form.
Removes paths that are covered by another path. For example,
"foo.bar" is covered by "foo" and will be removed if "foo"
is also in the FieldMask. Then sorts all paths in alphabetical order.
Args:
mask: The original FieldMask to be converted.
"""
tree = _FieldMaskTree(mask)
tree.ToFieldMask(self)
def Union(self, mask1, mask2):
"""Merges mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
tree.MergeFromFieldMask(mask2)
tree.ToFieldMask(self)
def Intersect(self, mask1, mask2):
"""Intersects mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
intersection = _FieldMaskTree()
for path in mask2.paths:
tree.IntersectPath(path, intersection)
intersection.ToFieldMask(self)
def MergeMessage(
self, source, destination,
replace_message_field=False, replace_repeated_field=False):
"""Merges fields specified in FieldMask from source to destination.
Args:
source: Source message.
destination: The destination message to be merged into.
replace_message_field: Replace message field if True. Merge message
field if False.
replace_repeated_field: Replace repeated field if True. Append
elements of repeated field if False.
"""
tree = _FieldMaskTree(self)
tree.MergeMessage(
source, destination, replace_message_field, replace_repeated_field)
def _IsValidPath(message_descriptor, path):
"""Checks whether the path is valid for Message Descriptor."""
parts = path.split('.')
last = parts.pop()
for name in parts:
field = message_descriptor.fields_by_name.get(name)
if (field is None or
field.label == FieldDescriptor.LABEL_REPEATED or
field.type != FieldDescriptor.TYPE_MESSAGE):
return False
message_descriptor = field.message_type
return last in message_descriptor.fields_by_name
def _CheckFieldMaskMessage(message):
"""Raises ValueError if message is not a FieldMask."""
message_descriptor = message.DESCRIPTOR
if (message_descriptor.name != 'FieldMask' or
message_descriptor.file.name != 'google/protobuf/field_mask.proto'):
raise ValueError('Message {0} is not a FieldMask.'.format(
message_descriptor.full_name))
def _SnakeCaseToCamelCase(path_name):
"""Converts a path name from snake_case to camelCase."""
result = []
after_underscore = False
for c in path_name:
if c.isupper():
raise ValueError(
'Fail to print FieldMask to Json string: Path name '
'{0} must not contain uppercase letters.'.format(path_name))
if after_underscore:
if c.islower():
result.append(c.upper())
after_underscore = False
else:
raise ValueError(
'Fail to print FieldMask to Json string: The '
'character after a "_" must be a lowercase letter '
'in path name {0}.'.format(path_name))
elif c == '_':
after_underscore = True
else:
result += c
if after_underscore:
raise ValueError('Fail to print FieldMask to Json string: Trailing "_" '
'in path name {0}.'.format(path_name))
return ''.join(result)
def _CamelCaseToSnakeCase(path_name):
"""Converts a field name from camelCase to snake_case."""
result = []
for c in path_name:
if c == '_':
raise ValueError('Fail to parse FieldMask: Path name '
'{0} must not contain "_"s.'.format(path_name))
if c.isupper():
result += '_'
result += c.lower()
else:
result += c
return ''.join(result)
class _FieldMaskTree(object):
"""Represents a FieldMask in a tree structure.
For example, given a FieldMask "foo.bar,foo.baz,bar.baz",
the FieldMaskTree will be:
[_root] -+- foo -+- bar
| |
| +- baz
|
+- bar --- baz
In the tree, each leaf node represents a field path.
"""
__slots__ = ('_root',)
def __init__(self, field_mask=None):
"""Initializes the tree by FieldMask."""
self._root = {}
if field_mask:
self.MergeFromFieldMask(field_mask)
def MergeFromFieldMask(self, field_mask):
"""Merges a FieldMask to the tree."""
for path in field_mask.paths:
self.AddPath(path)
def AddPath(self, path):
"""Adds a field path into the tree.
If the field path to add is a sub-path of an existing field path
in the tree (i.e., a leaf node), it means the tree already matches
the given path so nothing will be added to the tree. If the path
matches an existing non-leaf node in the tree, that non-leaf node
will be turned into a leaf node with all its children removed because
the path matches all the node's children. Otherwise, a new path will
be added.
Args:
path: The field path to add.
"""
node = self._root
for name in path.split('.'):
if name not in node:
node[name] = {}
elif not node[name]:
# Pre-existing empty node implies we already have this entire tree.
return
node = node[name]
# Remove any sub-trees we might have had.
node.clear()
def ToFieldMask(self, field_mask):
"""Converts the tree to a FieldMask."""
field_mask.Clear()
_AddFieldPaths(self._root, '', field_mask)
def IntersectPath(self, path, intersection):
"""Calculates the intersection part of a field path with this tree.
Args:
path: The field path to calculates.
intersection: The out tree to record the intersection part.
"""
node = self._root
for name in path.split('.'):
if name not in node:
return
elif not node[name]:
intersection.AddPath(path)
return
node = node[name]
intersection.AddLeafNodes(path, node)
def AddLeafNodes(self, prefix, node):
"""Adds leaf nodes begin with prefix to this tree."""
if not node:
self.AddPath(prefix)
for name in node:
child_path = prefix + '.' + name
self.AddLeafNodes(child_path, node[name])
def MergeMessage(
self, source, destination,
replace_message, replace_repeated):
"""Merge all fields specified by this tree from source to destination."""
_MergeMessage(
self._root, source, destination, replace_message, replace_repeated)
def _StrConvert(value):
"""Converts value to str if it is not."""
# This file is imported by c extension and some methods like ClearField
# requires string for the field name. py2/py3 has different text
# type and may use unicode.
if not isinstance(value, str):
return value.encode('utf-8')
return value
def _MergeMessage(
node, source, destination, replace_message, replace_repeated):
"""Merge all fields specified by a sub-tree from source to destination."""
source_descriptor = source.DESCRIPTOR
for name in node:
child = node[name]
field = source_descriptor.fields_by_name[name]
if field is None:
raise ValueError('Error: Can\'t find field {0} in message {1}.'.format(
name, source_descriptor.full_name))
if child:
# Sub-paths are only allowed for singular message fields.
if (field.label == FieldDescriptor.LABEL_REPEATED or
field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE):
raise ValueError('Error: Field {0} in message {1} is not a singular '
'message field and cannot have sub-fields.'.format(
name, source_descriptor.full_name))
if source.HasField(name):
_MergeMessage(
child, getattr(source, name), getattr(destination, name),
replace_message, replace_repeated)
continue
if field.label == FieldDescriptor.LABEL_REPEATED:
if replace_repeated:
destination.ClearField(_StrConvert(name))
repeated_source = getattr(source, name)
repeated_destination = getattr(destination, name)
repeated_destination.MergeFrom(repeated_source)
else:
if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
if replace_message:
destination.ClearField(_StrConvert(name))
if source.HasField(name):
getattr(destination, name).MergeFrom(getattr(source, name))
else:
setattr(destination, name, getattr(source, name))
def _AddFieldPaths(node, prefix, field_mask):
"""Adds the field paths descended from node to field_mask."""
if not node and prefix:
field_mask.paths.append(prefix)
return
for name in sorted(node):
if prefix:
child_path = prefix + '.' + name
else:
child_path = name
_AddFieldPaths(node[name], child_path, field_mask)
_INT_OR_FLOAT = six.integer_types + (float,)
def _SetStructValue(struct_value, value):
if value is None:
struct_value.null_value = 0
elif isinstance(value, bool):
# Note: this check must come before the number check because in Python
# True and False are also considered numbers.
struct_value.bool_value = value
elif isinstance(value, six.string_types):
struct_value.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
struct_value.number_value = value
elif isinstance(value, (dict, Struct)):
struct_value.struct_value.Clear()
struct_value.struct_value.update(value)
elif isinstance(value, (list, ListValue)):
struct_value.list_value.Clear()
struct_value.list_value.extend(value)
else:
raise ValueError('Unexpected type')
def _GetStructValue(struct_value):
which = struct_value.WhichOneof('kind')
if which == 'struct_value':
return struct_value.struct_value
elif which == 'null_value':
return None
elif which == 'number_value':
return struct_value.number_value
elif which == 'string_value':
return struct_value.string_value
elif which == 'bool_value':
return struct_value.bool_value
elif which == 'list_value':
return struct_value.list_value
elif which is None:
raise ValueError('Value not set')
class Struct(object):
"""Class for Struct message type."""
__slots__ = ()
def __getitem__(self, key):
return _GetStructValue(self.fields[key])
def __contains__(self, item):
return item in self.fields
def __setitem__(self, key, value):
_SetStructValue(self.fields[key], value)
def __delitem__(self, key):
del self.fields[key]
def __len__(self):
return len(self.fields)
def __iter__(self):
return iter(self.fields)
def keys(self): # pylint: disable=invalid-name
return self.fields.keys()
def values(self): # pylint: disable=invalid-name
return [self[key] for key in self]
def items(self): # pylint: disable=invalid-name
return [(key, self[key]) for key in self]
def get_or_create_list(self, key):
"""Returns a list for this key, creating if it didn't exist already."""
if not self.fields[key].HasField('list_value'):
# Clear will mark list_value modified which will indeed create a list.
self.fields[key].list_value.Clear()
return self.fields[key].list_value
def get_or_create_struct(self, key):
"""Returns a struct for this key, creating if it didn't exist already."""
if not self.fields[key].HasField('struct_value'):
# Clear will mark struct_value modified which will indeed create a struct.
self.fields[key].struct_value.Clear()
return self.fields[key].struct_value
def update(self, dictionary): # pylint: disable=invalid-name
for key, value in dictionary.items():
_SetStructValue(self.fields[key], value)
collections_abc.MutableMapping.register(Struct)
class ListValue(object):
"""Class for ListValue message type."""
__slots__ = ()
def __len__(self):
return len(self.values)
def append(self, value):
_SetStructValue(self.values.add(), value)
def extend(self, elem_seq):
for value in elem_seq:
self.append(value)
def __getitem__(self, index):
"""Retrieves item by the specified index."""
return _GetStructValue(self.values.__getitem__(index))
def __setitem__(self, index, value):
_SetStructValue(self.values.__getitem__(index), value)
def __delitem__(self, key):
del self.values[key]
def items(self):
for i in range(len(self)):
yield self[i]
def add_struct(self):
"""Appends and returns a struct value as the next value in the list."""
struct_value = self.values.add().struct_value
# Clear will mark struct_value modified which will indeed create a struct.
struct_value.Clear()
return struct_value
def add_list(self):
"""Appends and returns a list value as the next value in the list."""
list_value = self.values.add().list_value
# Clear will mark list_value modified which will indeed create a list.
list_value.Clear()
return list_value
collections_abc.MutableSequence.register(ListValue)
WKTBASES = {
'google.protobuf.Any': Any,
'google.protobuf.Duration': Duration,
'google.protobuf.FieldMask': FieldMask,
'google.protobuf.ListValue': ListValue,
'google.protobuf.Struct': Struct,
'google.protobuf.Timestamp': Timestamp,
}
| |
from pyscf.pbc.gto import Cell
from pyscf.pbc.scf import KRHF
from pyscf.pbc.tdscf import KTDHF
from pyscf.pbc.tdscf import krhf_slow_supercell, kproxy_supercell
from pyscf.tdscf.common_slow import eig
from test_common import retrieve_m, assert_vectors_close
import unittest
from numpy import testing
import numpy
def density_fitting_hf(x):
"""
Constructs density-fitting (Gamma-point) Hartree-Fock objects.
Args:
x (Cell): the supercell;
Returns:
The DF-HF object.
"""
return KRHF(x).density_fit()
class DiamondTestGamma(unittest.TestCase):
"""Compare this (supercell proxy) @Gamma vs reference (pyscf)."""
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
cls.model_krhf = model_krhf = KRHF(cell).density_fit()
model_krhf.kernel()
cls.td_model_krhf = td_model_krhf = KTDHF(model_krhf)
td_model_krhf.nroots = 5
td_model_krhf.kernel()
cls.ref_m_krhf = retrieve_m(td_model_krhf)
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_krhf
del cls.model_krhf
del cls.cell
def test_eri(self):
"""Tests all ERI implementations: with and without symmetries."""
e = kproxy_supercell.PhysERI(self.model_krhf, "hf", [1, 1, 1], density_fitting_hf)
m = e.tdhf_full_form()
testing.assert_allclose(self.ref_m_krhf, m, atol=1e-14)
vals, vecs = eig(m, nroots=self.td_model_krhf.nroots)
testing.assert_allclose(vals, self.td_model_krhf.e, atol=1e-5)
def test_class(self):
"""Tests container behavior."""
model = kproxy_supercell.TDProxy(self.model_krhf, "hf", [1, 1, 1], density_fitting_hf)
model.nroots = self.td_model_krhf.nroots
assert not model.fast
model.kernel()
testing.assert_allclose(model.e, self.td_model_krhf.e, atol=1e-5)
assert_vectors_close(model.xy, numpy.array(self.td_model_krhf.xy), atol=1e-12)
class DiamondTestSupercell2(unittest.TestCase):
"""Compare this (supercell proxy) @2kp vs reference (krhf_supercell_slow)."""
k = 2
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
k = cell.make_kpts([cls.k, 1, 1])
# K-points
cls.model_krhf = model_krhf = KRHF(cell, k).density_fit()
model_krhf.conv_tol = 1e-14
model_krhf.kernel()
# Add random phases
numpy.random.seed(0)
for i in model_krhf.mo_coeff:
i *= numpy.exp(2.j * numpy.pi * numpy.random.rand(i.shape[1]))[numpy.newaxis, :]
# The slow supercell KTDHF
cls.td_model_krhf = td_model_krhf = krhf_slow_supercell.TDRHF(model_krhf)
td_model_krhf.kernel()
cls.ref_m = td_model_krhf.eri.tdhf_full_form()
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_krhf
del cls.model_krhf
del cls.cell
def test_eri(self):
"""Tests ERI."""
e = kproxy_supercell.PhysERI(self.model_krhf, "hf", [self.k, 1, 1], density_fitting_hf)
m = e.tdhf_full_form()
# Test matrix vs ref
testing.assert_allclose(m, self.ref_m, atol=1e-11)
# Test transformations
testing.assert_allclose(
e.model_super.supercell_rotation.dot(e.model_super.supercell_inv_rotation).toarray(),
numpy.eye(e.model_super.supercell_rotation.shape[0]),
)
def test_class(self):
"""Tests container behavior."""
model = kproxy_supercell.TDProxy(self.model_krhf, "hf", [self.k, 1, 1], density_fitting_hf)
model.nroots = self.td_model_krhf.nroots
assert not model.fast
model.kernel()
testing.assert_allclose(model.e, self.td_model_krhf.e, atol=1e-5)
# Test real
testing.assert_allclose(model.e.imag, 0, atol=1e-8)
nocc = nvirt = 4
testing.assert_equal(model.xy.shape, (len(model.e), 2, self.k, self.k, nocc, nvirt))
# Test only non-degenerate roots
d = abs(model.e[1:] - model.e[:-1]) < 1e-8
d = numpy.logical_or(numpy.concatenate(([False], d)), numpy.concatenate((d, [False])))
d = numpy.logical_not(d)
assert_vectors_close(self.td_model_krhf.xy[d], model.xy[d], atol=1e-5)
class DiamondTestSupercell3(DiamondTestSupercell2):
"""Compare this (supercell proxy) @3kp vs reference (krhf_supercell_slow)."""
k = 3
class FrozenTest(unittest.TestCase):
"""Tests frozen behavior."""
k = 2
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'sto-3g'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
k = cell.make_kpts([cls.k, 1, 1])
# K-points
cls.model_krhf = model_krhf = KRHF(cell, k).density_fit()
model_krhf.conv_tol = 1e-14
model_krhf.kernel()
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.model_krhf
del cls.cell
def test_eri(self):
"""Tests ERI."""
for frozen in (1, [0, 1], [0, -1]):
try:
e = kproxy_supercell.PhysERI(self.model_krhf, "hf", [self.k, 1, 1], density_fitting_hf, frozen=frozen)
m = e.tdhf_full_form()
ref_e = krhf_slow_supercell.PhysERI4(self.model_krhf, frozen=frozen)
ref_m = ref_e.tdhf_full_form()
# Test matrix vs ref
testing.assert_allclose(m, ref_m, atol=1e-11)
except Exception:
print("When testing class with frozen={} the following exception occurred:".format(repr(frozen)))
raise
class FrozenTest3(FrozenTest):
"""Tests frozen behavior K=3."""
k = 3
| |
import os,sys
import numpy as np
from Utility import *
import Errors
import nest
class Models:
_ModelTemplate = {
'Init': {
# field_name : default , acceptable , len, content_type, convert_type
'spike_times' : ( [1.0] , (list,tuple), 1 , float , float ),
'spike_weights' : ( [10.0] , (list,tuple), 1 , float , float )
},
'Clk' : {
# field_name : default , acceptable , len , content_type, convert_type
'C_m' : ( 1.0 , (int, float), None, None , float ),
'E_L' : ( 0.0 , (int, float), None, None , float ),
'I_e' : ( 0.0 , (int, float), None, None , float ),
'V_m' : ( 0.0 , (int, float), None, None , float ),
'V_reset' : ( 0.0 , (int, float), None, None , float ),
'V_th' : ( 1.0 , (int, float), None, None , float ),
't_ref' : ( 20.0 , (int, float), None, None , float ),
'tau_m' : ( 1.0 , (int, float), None, None , float )
},
'WSN' : {
# field_name : default , acceptable , len , content_type, convert_type
'C_m' : ( 1.0 , (int, float), None, None , float ),
'E_L' : ( 0.0 , (int, float), None, None , float ),
'I_e' : ( 0.0 , (int, float), None, None , float ),
'Sigma' : ( ['const',1.0], (list,tuple,Distribution),
None, None, Distribution),
'D_Int' : ( ['const',50.0], (list,tuple,Distribution),
None, None, Distribution),
'V_m' : ( 0.0 , (int, float), None, None , float ),
'V_reset' : ( -20.0 , (int, float), None, None , float ),
'V_th' : ( -1.0 , (int, float), None, None , float ),
't_ref' : ( 1.0 , (int, float), None, None , float ),
'tau_m' : ( 30.0 , (int, float), None, None , float ),
},
'Clk_Clk' : {
# field_name: default , acceptable , len , content_type, convert_type
'weight' : (10.0 , (int, float), None, None , float ),
'delay' : (100.0 , (int, float), None, None , float )
},
'Clk_WSN' : {
# field_name : default , acceptable , len , content_type, convert_type
'weight' : (1.0 , (int, float), None, None , float ),
'delay' : (1.0 , (int, float), None, None , float )
#'receptor_type': (1 , int , None, None , None ),
},
'Clk_Spk' : {
# field_name : default , acceptable , len , content_type, convert_type
'weight' : (1.0 , (int, float), None, None , float ),
'delay' : (1.0 , (int, float), None, None , float ),
}
}
def __init__(self,**model):
# __init__(model=allmodes, **special_model)
if 'model' in model.keys():
self.__init__(**model['model'])
del model['model']
dummyModelIn = dict.fromkeys(model)
for key,item in Models._ModelTemplate.iteritems():
if key in model:
self.__dict__[key] = TemplateDict(Models._ModelTemplate[key],**model[key])
del dummyModelIn[key]
else:
self.__dict__[key] = TemplateDict(Models._ModelTemplate[key])
if bool(dummyModelIn):
raise Errors.ModelUndefinedError(', '.join(dummyModelIn.keys()))
def setParam(self,**model):
mt = Models._ModelTemplate
allModelNames = mt.keys()
for key in model.keys():
if key not in allModelNames:
raise Errors.ModelUndefinedError(key)
if model[key] is None:
# reset to default
self.__dict__[key] = TemplateDict(mt[key])
else:
self.__dict__[key].setItems(**model[key])
def D(self,name):
# return the nest compatible dict
if not isinstance(name,str):
raise Errors.ParamTypeError('name','str')
if name not in self.__dict__.keys():
raise Errors.ModelUndefinedError(name)
return self.__dict__[name].genNestDict()
class InputSignal:
def __init__(self,t,I,I_size):
# t is the time table, y is the signal amplitude
if not isinstance(t,(list,tuple,np.ndarray)):
raise Errors.ParamTypeError('t', 'list, tuple or numpy array')
if not isinstance(I,(list,tuple,np.ndarray)):
raise Errors.ParamTypeError('I', 'list, tuple or numpy array')
if not isinstance(I_size,(list,tuple,np.ndarray)):
raise Errors.ParamTypeError('I_size', 'list, tuple or numpy array')
if not isinstance(t, np.ndarray):
self.t = np.array(t,dtype=float)
else:
self.t = t
if not isinstance(I, np.ndarray):
self.I = np.array(I,dtype=float)
else:
self.I = I
if not isinstance(I_size,np.ndarray):
self.I_size = np.array(I_size,dtype=int)
else:
self.I_size = I_size
if self.t.ndim != 1:
raise Errors.ParamDimError('t', 1)
else:
self.N_sample = len(self.t)
if self.I_size.ndim != 1:
raise Errors.ParamDimError('I_size', 1)
if np.prod(self.I_size) == 1:
if self.I.ndim != 1:
raise Errors.ParamDimError('I', 1)
if len(self.I) != self.N_sample:
raise Errors.ParamSizeError('I', [self.N_sample])
else:
if self.I.ndim != len(self.I_size) + 1:
raise Errors.ParamSizeError('I', len(self.I_size) +1)
if self.I.shape[:-1] != self.I_size:
from copy import deepcopy
corrSize=deepcopy(I_size)
corrSize.append(self.N_sample)
raise Errors.ParamSizeError('I', corrSize)
self.h = t[1]-t[0]
self.length = t[-1]-t[0]+self.h
self.freq = 1.0 / self.h
self.I = self.I.reshape( (np.prod(I_size),self.N_sample) )
class WSNnet:
_NetParamTemplate = {
#'name_of_param': (default, (allowed types), size, content, conv_type)
'sim_h' : ( 0.01 , (float,int) , None, None , float ),
'save_h' : ( 0.01 , (float,int) , None, None , float ),
'Size_In' : ( [1] , (list,tuple) , None, int , None ),
'N_Sig_Per_Ch' : ( 100 , int , None, None , None ),
'N_Per_Sig' : ( 1 , int , None, None , None ),
'Vth' : ( -1.0 , (float,int) , None, None , float ),
'Vc' : ( -5.0 , (float,int) , None, None , float ),
'Tau' : ( 40.0 , (float,int) , None, None , float ),
'Sigma' : (['const',1.0], (list,tuple,Distribution)
, None , None , Distribution ),
'T_Clk' : ( 100.0 , (float,int) , None, None , float ),
#'T_Enc' : ( 45.0 , (float,int) , None, None , float ),
#'D_Enc' : ( 50.0 , (float,int) , None, None , float ),
'D_Int' : (['const',1.0], (list,tuple,Distribution)
, None , None , Distribution ),
'En_Trace' : ( False , bool , None, None , None ),
'TraceNodes' : ( [] , (list,tuple) , None, int , None )
}
_NestParamTemplate = {
#'WSN_Model_Name' : ( 'iaf_freq_sensor_v2', str , None, None, None ),
'WSN_Model_Name' : ( 'wsn_hermitian_2', str , None, None, None ),
'MyModule_Name' : ( 'mymodule' , str , None, None, None ),
#'Syn_Model_Name' : ( ['CLK_WSN_synapse', 'SRC_WSN_synapse'],
#list, None, None, None ),
'verbosity' : ( 'M_ERROR' , str , None, None, None ),
'print_time' : ( True , bool, None, None, None )
}
def __init__(self, **kwargs):
self.NetP = TemplateDict(WSNnet._NetParamTemplate,**kwargs)
self.NestP = TemplateDict(WSNnet._NestParamTemplate)
self.Mod = Models()
# set Vth, Vc, Tau, Sigma, T_Clk, T_Enc, T_Delay if needed
# update wsn model
P = self.NetP
updates = P.genDict()
if 'save_h' not in kwargs.keys():
updates['save_h'] = P.sim_h
if 'Sigma' not in kwargs.keys():
updates['Sigma'] = ['linear',P.sim_h*2.0,P.T_Enc]
if 'Vc' not in kwargs.keys():
updates['Vth'] = P.Vth
self.setParam(**updates)
def setParam(self,**param):
self.NetP.setItems(**param)
P = self.NetP
M = self.Mod
U = {}
if ('Vc' not in param) and (
'Vth' in param or 'T_Clk' in param.keys()):
if P.Vth < 0:
P.Vc = np.asscalar(P.Vth * np.exp(P.T_Clk/P.Tau))
elif P.Vth > 0:
P.Vc = 0.0
else:
raise Errors.NetParamError("Vth should not equal to zero")
if 'Sigma' in param or 'N_Sig_Per_Ch' in param:
P.Sigma.rand(P.N_Sig_Per_Ch)
U['Sigma']=P.Sigma
if 'D_Int' in param or 'N_Per_Sig' in param:
P.D_Int.rand(P.N_Per_Sig)
U['D_Int'] = P.D_Int
if 'Vth' in param:
U['V_th']=P.Vth
if 'Tau' in param:
U['tau_m'] = P.Tau
if ('Vth' in param) or ('T_Clk' in param) or ('Vc' in param):
U['V_reset'] = P.Vc
if bool(U):
M.setParam(WSN=U)
if 'T_Clk' in param:
M.setParam(Clk_Clk = {'delay': P.T_Clk})
def CreateNet(self,rebuild=True,SaveFile=None):
from copy import deepcopy
from os import path
if SaveFile is not None:
self.SaveFile = path.abspath(SaveFile)
rec_dict = {
'to_file' : True,
'to_memory' : False,
}
else:
self.SaveFile = None
rec_dict = {
'to_file' : False,
'to_memory' : True,
}
#setup nest kernel
nest.set_verbosity(self.NestP.verbosity)
if self.NestP.WSN_Model_Name not in nest.Models():
nest.Install(self.NestP.MyModule_Name)
if rebuild:
nest.ResetKernel()
nest.SetStatus([0],{'resolution':self.NetP.sim_h})
Mod = self.Mod
NetP = self.NetP
NestP = self.NestP
#Create Nodes
NetP.N_Per_Channel = NetP.N_Sig_Per_Ch * NetP.N_Per_Sig
self.N_In = np.asscalar(np.prod(NetP.Size_In))
self.WSN_Size = list(deepcopy(NetP.Size_In))
self.WSN_Size.append(NetP.N_Per_Channel)
self.N_WSN = self.N_In * NetP.N_Per_Channel
self.node_init = nest.Create("spike_generator", 1 ,params = Mod.D('Init'))
self.node_clk = nest.Create("iaf_psc_alpha" , 1 ,params = Mod.D('Clk'))
self.node_src = nest.Create("step_current_generator", self.N_In)
self.node_src_grps = np.reshape(self.node_src, NetP.Size_In)
self.node_wsn = nest.Create(NestP.WSN_Model_Name, self.N_WSN, params = Mod.D('WSN'))
self.node_wsn_chan_grps = np.reshape(self.node_wsn,(self.N_In,NetP.N_Sig_Per_Ch,NetP.N_Per_Sig))
self.node_wsn_grps = np.reshape(self.node_wsn, self.WSN_Size)
self.spk_wsn = nest.Create("spike_detector",1,params=rec_dict)
self.spk_clk = nest.Create("spike_detector",1,params=rec_dict)
if SaveFile is not None:
nest.SetStatus(self.spk_wsn,{'label':'_'.join([self.SaveFile,'spk_wsn'])})
nest.SetStatus(self.spk_clk,{'label':'_'.join([self.SaveFile,'spk_clk'])})
#Create multimeter nodes for Tracing
if NetP.En_Trace:
if not NetP.TraceNodes:
rec_nodes = self.node_wsn
else:
rec_nodes = NetP.TraceNodes
rec_fields = nest.GetStatus([rec_nodes[0]],"recordables")[0]
self.rec_wsn = nest.Create("multimeter",len(rec_nodes),params={
'record_from' : rec_fields,
'interval' : NetP.save_h
})
self.rec_clk = nest.Create("multimeter",1,params={
'record_from' : ['V_m'],
'interval' : NetP.save_h
})
if SaveFile is not None:
nest.SetStatus(self.rec_wsn,rec_dict)
nest.SetStatus(self.rec_clk, rec_dict)
nest.SetStatus(self.rec_wsn,{'label':'_'.join([self.SaveFile,'rec_wsn'])})
nest.SetStatus(self.rec_clk,{'label':'_'.join([self.SaveFile,'rec_clk'])})
#Connect Nodes
nest.Connect(self.node_init,self.node_clk)
nest.Connect(self.node_clk,self.node_clk,syn_spec=Mod.D('Clk_Clk'))
nest.Connect(self.node_clk, self.spk_clk,syn_spec=Mod.D('Clk_Spk'))
nest.Connect(self.node_wsn, self.spk_wsn,'all_to_all')
#Connect Clk to WSN
nest.Connect(self.node_clk, self.node_wsn,'all_to_all',syn_spec=Mod.D('Clk_WSN'))
#Connect source nodes to WSN nodes and setup the sigmas and d_ints
for src, wsn_sig_grps in zip(self.node_src, self.node_wsn_chan_grps):
# setup sigma and d_int
sigDist = Mod.WSN.Sigma
delayDist = Mod.WSN.D_Int
for w_sig, s in zip(wsn_sig_grps, sigDist.data):
for w,d in zip(w_sig, delayDist.data):
nest.SetStatus([w],{'Sigma':s, 'D_Int': d})
# Connect Source to WSN
nest.Connect([src],np.ravel(wsn_sig_grps).tolist(),'all_to_all')
if NetP.En_Trace:
# Connect multimeter nodes for tracing
nest.Connect(self.rec_wsn,rec_nodes)
nest.Connect(self.rec_clk,self.node_clk)
def Sim(self, t_in, I_in, reset=True):
if 'node_wsn' not in self.__dict__:
raise Errors.NetNotInitError()
NetP = self.NetP
#self.signal=InputSignal(t_in,I_in,NetP.Size_In)
#S = self.signal
S = InputSignal(t_in, I_in, NetP.Size_In)
nest.set_verbosity(self.NestP.verbosity)
nest.SetStatus([0],{
'print_time': self.NestP.print_time,
'overwrite_files' : True
})
# apply signal to source nodes
for s,Is in zip(self.node_src,S.I):
nest.SetStatus([s],{
'amplitude_times' : S.t-t_in[0],
'amplitude_values' : Is
})
nest.Simulate(S.length)
def get_sigmas(self,nodes=None):
if nodes is None:
nodes = self.node_wsn
sigs = nest.GetStatus(nodes,"Sigma")
self.sigs = dict(zip(nodes,sigs))
return sigs
def get_delays(self,nodes=None):
if nodes is None:
nodes = self.node_wsn
delays = nest.GetStatus(nodes,'D_Int')
self.delays = delays
return delays
#syns = nest.GetConnections(
#target=self.node_wsn,
#synapse_model=self.NestP.Syn_Model_Name[0] )
#self.int_delays = nest.GetStatus(syns,'delay')
#syns = nest.GetConnections(
#target=self.node_wsn,
#synapse_model=self.NestP.Syn_Model_Name[1] )
#self.enc_delays = nest.GetStatus(syns,'delay')
#return nest.GetStatus(syns,'delay')
def get_WSN_spikes(self,t0=None, t1=None, nodes=None):
if nodes is None:
nodes = self.node_wsn
if self.SaveFile is None:
wsn_events = nest.GetStatus(self.spk_wsn,'events')[0]
times = wsn_events['times']
senders = wsn_events['senders'].astype(int)
if t0 is None:
t0msec = min(times)
else:
if isinstance(t0,Time):
t0msec=t0.as_msec()
else:
t0msec=t0*1000.0
if t1 is None:
t1msec = max(times)
else:
if isinstance(t1,Time):
t1msec=t1.as_msec()
else:
t1msec=t1*1000.0
use_idx = np.array([ t>=t0msec and t<=t1msec and s in nodes
for t,s in zip(times,senders) ])
return times[use_idx],senders[use_idx]
else:
spkfile = self.get_spk_filename('wsn')
senders, times = self.load_spk_fromfile(spkfile,t0,t1)
return times,senders
#wsn_events = np.loadtxt(spkfile)
#senders = wsn_events[:,0].astype(int)
#times = wsn_events[:,1]
def get_clk_times(self,t0=None,t1=None):
if self.SaveFile is None:
clks = nest.GetStatus(self.spk_clk, 'events')[0]['times']
if t0 is None:
t0msec = min(clks)
else:
if isinstance(t0,Time):
t0msec = t0.as_msec()
else:
t0msec = t0*1000.0
if t1 is None:
t1msec = max(clks)
else:
if isinstance(t1,Time):
t1msec = t1.as_msec()
else:
t1msec = t1*1000.0
return clks[np.logical_and(clks>=t0msec,clks<=t1msec)]
else:
spkfile = self.get_spk_filename('clk')
senders, clks = self.load_spk_fromfile(spkfile,t0,t1)
return clks
def get_spk_filename(self,ftype='wsn'):
if self.SaveFile is not None:
import os
from fnmatch import fnmatch
fd,fn = os.path.split(self.SaveFile)
flist = os.listdir(fd)
if ftype=='wsn':
fpat = '_'.join([fn,'spk_wsn*'])
elif ftype=='clk':
fpat = '_'.join([fn,'spk_clk*'])
else:
raise Errors.NetPathInexistError
spk_f = [ f for f in flist if fnmatch(f, fpat) ]
if not spk_f:
raise Errors.NetPathInexistError
else:
return os.path.join(fd,spk_f[-1])
else:
raise Errors.NetPathInexistError
def load_spk_fromfile(self,filename,t0=None,t1=None):
from Utility import Time
f = open(filename,'r')
import csv
reader = csv.reader(f,delimiter='\t')
senders = list()
times = list()
if t0 is not None:
if not isinstance(t0,Time):
t0 = Time(seconds = t0)
t0 = t0.as_msec()
if t1 is not None:
if not isinstance(t1,Time):
t1 = Time( seconds=t1 )
t1 = t1.as_msec()
for r in reader:
t = float(r[1])
sv_this = True
if t0 is not None:
sv_this = t>=t0
if t1 is not None:
sv_this = sv_this and (t<=t1)
if sv_this:
senders.append(int(r[0]))
times.append(t)
return np.array(senders,dtype=int),np.array(times,dtype=float)
def get_WSN_spike_phases(self,t0=None,t1=None,nodes=None,inverse=False,group=False):
from bisect import bisect_right
clk = self.get_clk_times(t0,t1)
if nodes is None:
nodes = self.node_wsn
grp_shape = np.array(self.node_wsn_chan_grps.shape)
else:
grp_shape = np.array(np.array(nodes).shape)
nodes = np.ravel(nodes)
times, senders = self.get_WSN_spikes(t0,t1,nodes)
if self.SaveFile is None:
self.get_delays(nodes)
#int_delays = self.int_delays
#enc_delays = self.enc_delays
T_Clk = self.NetP.T_Clk
nodes_idx = dict(zip(nodes,range(len(nodes))))
spk_phase = np.full( (len(nodes), len(clk)), np.NaN )
for t, s in zip(times,senders):
nid = nodes_idx[s]
#enc_duration = int_delays[nid] + enc_delays[nid] + T_Enc
if t>=clk[0] and t<=clk[-1]+T_Clk:
cid = bisect_right(clk,t)-1
if inverse:
phase = clk[cid] + T_Clk - t
else:
phase = t - clk[cid]
spk_phase[nid,cid] = phase
if group:
return clk[:-1], np.reshape(spk_phase[:,:-1], np.append(grp_shape,len(clk)-1))
else:
return clk[:-1], spk_phase[:,:-1]
if __name__=='__main__':
from pylab import *
from nest.raster_plot import from_device as raster
import scipy.signal as sigpack
sys.path.append("../Tools")
import FileIO
import PlotTools
t_in, I_in = FileIO.LoadVoiceData("an4_clstk/fash/an253-fash-b.raw")
#t_in, I_in = PF.LoadVoiceData("an4_clstk/fjam/cen1-fjam-b.raw")
t_in = t_in * 10000.0
In_Amp = 5.0
I_in = I_in * In_Amp
network = WSNnet(
Sigma=['linear', 0.2, 10.0],
Size_In=[1],
N_Sig_Per_Ch=10,
Vth=-1.0,
Tau=45.0,
D_Int=['const', 25.0],
En_Trace = False
)
network.CreateNet()
network.Sim(t_in,I_in)
#times, senders = network.get_WSN_spikes(3000,5000)
#events = nest.GetStatus([network.rec_wsn[3]],'events')[0]
clk, phase = network.get_WSN_spike_phases(group=False)
sigmas = network.get_sigmas()
#PlotTools.pcolor(clk,sigmas,phase,y_ticks=True,y_ticklabels=['a','b','c'])
#PlotTools.raster(times,nodes)
show()
#OF.PlotVoiceResult(u_events,clk,sigmas,u_nodes,t_in,I_in,P.syn.Clk_Enc['delay'],P.neu.U['Ti'])
ion()
| |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_addrgrp
short_description: Configure IPv4 address groups in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and addrgrp category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_addrgrp:
description:
- Configure IPv4 address groups.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
allow_routing:
description:
- Enable/disable use of this group in the static route configuration.
type: str
choices:
- enable
- disable
color:
description:
- Color of icon on the GUI.
type: int
comment:
description:
- Comment.
type: str
member:
description:
- Address objects contained within the group.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
name:
description:
- Address group name.
required: true
type: str
tagging:
description:
- Config object tagging.
type: list
suboptions:
category:
description:
- Tag category. Source system.object-tagging.category.
type: str
name:
description:
- Tagging entry name.
required: true
type: str
tags:
description:
- Tags.
type: list
suboptions:
name:
description:
- Tag name. Source system.object-tagging.tags.name.
required: true
type: str
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
visibility:
description:
- Enable/disable address visibility in the GUI.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv4 address groups.
fortios_firewall_addrgrp:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_addrgrp:
allow_routing: "enable"
color: "4"
comment: "Comment."
member:
-
name: "default_name_7 (source firewall.address.name firewall.addrgrp.name)"
name: "default_name_8"
tagging:
-
category: "<your_own_value> (source system.object-tagging.category)"
name: "default_name_11"
tags:
-
name: "default_name_13 (source system.object-tagging.tags.name)"
uuid: "<your_own_value>"
visibility: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_addrgrp_data(json):
option_list = ['allow_routing', 'color', 'comment',
'member', 'name', 'tagging',
'uuid', 'visibility']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_addrgrp(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_addrgrp'] and data['firewall_addrgrp']:
state = data['firewall_addrgrp']['state']
else:
state = True
firewall_addrgrp_data = data['firewall_addrgrp']
filtered_data = underscore_to_hyphen(filter_firewall_addrgrp_data(firewall_addrgrp_data))
if state == "present":
return fos.set('firewall',
'addrgrp',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'addrgrp',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_addrgrp']:
resp = firewall_addrgrp(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_addrgrp": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"allow_routing": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"member": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"tagging": {"required": False, "type": "list",
"options": {
"category": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"tags": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"uuid": {"required": False, "type": "str"},
"visibility": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 15:53:19 2015
@author: pedro.correia
"""
from __future__ import division # Just making sure that correct integer division is working
import numpy as np # This is numpy,python numerical library
import objects_parser as obj # Our local objects library.
def create_object_by_interdistance(obj1,obj2,sheet1,sheet2,coords1,coords2,maxdist=3,extra=''):
"""
This functions will receive two excel class objects and given the coord keys
(x,y,z,variable) will create a whole new object only with the indexes of points
in obj1 that are in a distance less than maxdist from points in obj2.
"""
# Creating shortcut names for x,y,z in object 1
x1 = obj1.me[sheet1][coords1[0]]
y1 = obj1.me[sheet1][coords1[1]]
z1 = obj1.me[sheet1][coords1[2]]
# Creating shortcut names for x,y,z in object 2
x2 = obj2.me[sheet2][coords2[0]]
y2 = obj2.me[sheet2][coords2[1]]
z2 = obj2.me[sheet2][coords2[2]]
mask = (x2!=obj2.null)
counter = 0
for i in xrange(x1.shape[0]):
if x1[i]!=obj1.null and y1[i]!=obj1.null and z1[i]!=obj1.null:
dist = np.sqrt((x1[i]-x2[mask])**2+(y1[i]-y2[mask])**2+(z1[i]-z2[mask])**2)
if dist.min()<=maxdist:
counter = counter + 1
nx = np.zeros(counter,dtype='float32')
ny = np.zeros(counter,dtype='float32')
nz = np.zeros(counter,dtype='float32')
variables = {}
if len(coords1)>4:
for i in xrange(3,len(coords1)):
variables[coords1[i]] = np.zeros(counter,dtype='float32')
else:
variables[coords1[3]] = np.zeros(counter,dtype='float32')
if len(extra)>0:
variables[extra] = np.zeros(counter,dtype='|S15')
s_extra = True
counter = 0
for i in xrange(x1.shape[0]):
if x1[i]!=obj1.null and y1[i]!=obj1.null and z1[i]!=obj1.null:
dist = np.sqrt((x1[i]-x2[mask])**2+(y1[i]-y2[mask])**2+(z1[i]-z2[mask])**2)
if dist.min()<=maxdist:
nx[counter] = x1[i]
ny[counter] = y1[i]
nz[counter] = z1[i]
for j in xrange(3,len(coords1)):
variables[coords1[j]][counter] = obj1.me[sheet1][coords1[j]][i]
if s_extra: variables[extra][counter] = obj1.me[sheet1][extra][i]
counter = counter + 1
variables[coords1[0]] = nx
variables[coords1[1]] = ny
variables[coords1[2]] = nz
result = {}
result[sheet1] = variables
return obj.excelObject(result,obj1.null)
def create_scatter_object_by_interdistance(obj1,obj2,sheet1,sheet2,coords1,coords2,extra='2',maxdist=3):
"""
FROM: create_object_by_interdistance
This functions will receive two excel class objects and given the coord keys
(x,y,z,variable) will create a whole new object only with the indexes of points
in obj1 that are in a distance less than maxdist from points in obj2.
FROM: create_scatter_object_by_interdistance
The difference from the first is that the return object has one extra variable
which is the closest point from obj2.
"""
# Creating shortcut names for x,y,z in object 1
x1 = obj1.me[sheet1][coords1[0]]
y1 = obj1.me[sheet1][coords1[1]]
z1 = obj1.me[sheet1][coords1[2]]
# Creating shortcut names for x,y,z in object 2
x2 = obj2.me[sheet2][coords2[0]]
y2 = obj2.me[sheet2][coords2[1]]
z2 = obj2.me[sheet2][coords2[2]]
mask = (x2!=obj2.null)
counter = 0
for i in xrange(x1.shape[0]):
if x1[i]!=obj1.null and y1[i]!=obj1.null and z1[i]!=obj1.null:
dist = np.sqrt((x1[i]-x2[mask])**2+(y1[i]-y2[mask])**2+(z1[i]-z2[mask])**2)
if dist.min()<=maxdist:
counter = counter + 1
nx = np.zeros(counter,dtype='float32')
ny = np.zeros(counter,dtype='float32')
nz = np.zeros(counter,dtype='float32')
variables = {}
if len(coords1)>4:
for i in xrange(3,len(coords1)):
variables[coords1[i]] = np.zeros(counter,dtype='float32')
else:
variables[coords1[3]] = np.zeros(counter,dtype='float32')
variables[coords2[3]+extra] = np.zeros(counter,dtype='float32')
counter = 0
for i in xrange(x1.shape[0]):
if x1[i]!=obj1.null and y1[i]!=obj1.null and z1[i]!=obj1.null:
dist = np.sqrt((x1[i]-x2[mask])**2+(y1[i]-y2[mask])**2+(z1[i]-z2[mask])**2)
if dist.min()<=maxdist:
nx[counter] = x1[i]
ny[counter] = y1[i]
nz[counter] = z1[i]
for j in xrange(3,len(coords1)):
variables[coords1[j]][counter] = obj1.me[sheet1][coords1[j]][i]
indx = dist.argmin()
variables[coords2[3]+extra][counter] = obj2.me[sheet2][coords2[2]][mask][indx]
counter = counter + 1
variables[coords1[0]] = nx
variables[coords1[1]] = ny
variables[coords1[2]] = nz
result = {}
result[sheet1] = variables
return obj.excelObject(result,obj1.null)
def create_object_by_type(obj1,sheet,variables,variable,value):
"""
This function will build a new object from an old one considering only
the values = value from variable variable. The chosen variables for the
new object are the variables list.
"""
ind = np.where(obj1.me[sheet][variable]==value)
result = {}
local = {}
for i in variables:
local[i] = obj1.me[sheet][i][ind]
result[sheet] = local
return obj.excelObject(result,obj1.null)
def calculate_variogram(obj1,sheet,variables):
"""
This function will create a variogram table that can latter be used to
compute a directional variogram (there's a directional_variogram function).
"""
x = obj1.me[sheet][variables[0]]
y = obj1.me[sheet][variables[1]]
z = obj1.me[sheet][variables[2]]
v = obj1.me[sheet][variables[3]]
full_size = np.sum(xrange(x.shape[0]))-x.shape[0]+1
variogram_list = np.zeros((full_size,5),dtype='float32')
variogram_list[:,:] = -99
counter = 0
l = x.shape[0]
for i in xrange(1,l-1):
distx = (x[i+1:]-x[i])
disty = (y[i+1:]-y[i])
distz = (z[i+1:]-z[i])
azimuth = np.arctan2(np.abs(distx),np.abs(disty))*180/np.pi
Q2 = np.where((distx<0) & (disty>0))
Q4 = np.where((distx>0) & (disty<0))
azimuth[Q2[0]] = azimuth[Q2[0]]*-1
azimuth[Q4[0]] = azimuth[Q4[0]]*-1
dip = np.arctan2(distz,np.sqrt(distx**2+disty**2))*180/np.pi #np.arctan2(distz,np.sqrt(distx**2+disty**2))*180/np.pi
dist = np.sqrt(distx**2+disty**2+distz**2)
var = (v[i+1:]-v[i])**2
variogram_list[counter:counter+(l-i-1),0] = dist[:]
variogram_list[counter:counter+(l-i-1),1] = azimuth[:]
variogram_list[counter:counter+(l-i-1),2] = dip[:]
variogram_list[counter:counter+(l-i-1),3] = var[:]
variogram_list[counter:counter+(l-i-1),4] = np.abs(distz[:])
counter = counter+(l-i-1)
return variogram_list
def directional_variogram(variogram_list,azimuth,dip,tolerance,bins,maximum=False,dz=False):
"""
This function will return a the arrays necessary to plot an experimental
variogram. You'll need first to calculate the variogram table using
calculate_variogram function.
"""
if type(maximum)==bool: maximum = variogram_list[:,0].max()
if dip==0:
if type(dz)==bool:
ind0 = np.where((variogram_list[:,1]<=azimuth+tolerance) & (variogram_list[:,1] >= azimuth - tolerance) & (variogram_list[:,2]<=dip+tolerance) & (variogram_list[:,2] >= dip - tolerance))
if azimuth+tolerance>90:
dif = -90 + (azimuth + tolerance - 90)
ind0b = np.where((variogram_list[:,1]<=dif) & (variogram_list[:,2]<=dip+tolerance) & (variogram_list[:,2] >= dip - tolerance))
ind0 = (np.hstack((ind0[0],ind0b[0])),)
elif azimuth-tolerance<-90:
dif = 90 - np.abs((azimuth - tolerance + 90))
ind0b = np.where((variogram_list[:,1]>=dif) & (variogram_list[:,2]<=dip+tolerance) & (variogram_list[:,2] >= dip - tolerance))
ind0 = (np.hstack((ind0[0],ind0b[0])),)
else:
ind0 = np.where((variogram_list[:,1]<=azimuth+tolerance) & (variogram_list[:,1] >= azimuth - tolerance) & (variogram_list[:,2]<=dip+tolerance) & (variogram_list[:,2] >= dip - tolerance) & (variogram_list[:,4]<=dz))
if azimuth+tolerance>90:
dif = -90 + (azimuth + tolerance - 90)
ind0b = np.where((variogram_list[:,1]<=dif) & (variogram_list[:,2]<=dip+tolerance) & (variogram_list[:,2] >= dip - tolerance) & (variogram_list[:,4]<=dz))
ind0 = (np.hstack((ind0[0],ind0b[0])),)
elif azimuth-tolerance<-90:
dif = 90 - np.abs((azimuth - tolerance + 90))
ind0b = np.where((variogram_list[:,1]>=dif) & (variogram_list[:,2]<=dip+tolerance) & (variogram_list[:,2] >= dip - tolerance) & (variogram_list[:,4]<=dz))
ind0 = (np.hstack((ind0[0],ind0b[0])),)
else:
ind0 = np.where((variogram_list[:,2]<=dip+tolerance) & (variogram_list[:,2] >= dip - tolerance))
countsPerBin = np.histogram(variogram_list[ind0,0],bins=bins,range=[0,maximum])
sumsPerBin = np.histogram(variogram_list[ind0,0],bins=bins,range=[0,maximum], weights=variogram_list[ind0,3])
ind = np.where(countsPerBin[0]!=0)
average = sumsPerBin[0][ind] / countsPerBin[0][ind]
if len(average)>0:
return (average/2,sumsPerBin[1][ind]+(sumsPerBin[1][1]-sumsPerBin[1][0])/2)
else:
return (np.array([-10,-10,-10]),np.array([30,70,maximum]))
| |
# -*- coding: utf-8 -*-
import numpy as np
from allel.model.ndarray import SortedIndex
from allel.util import asarray_ndim, ignore_invalid, check_equal_length
def moving_statistic(values, statistic, size, start=0, stop=None, step=None, **kwargs):
"""Calculate a statistic in a moving window over `values`.
Parameters
----------
values : array_like
The data to summarise.
statistic : function
The statistic to compute within each window.
size : int
The window size (number of values).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
kwargs
Additional keyword arguments are passed through to the `statistic`
function.
Returns
-------
out : ndarray, shape (n_windows,)
Examples
--------
>>> import allel
>>> values = [2, 5, 8, 16]
>>> allel.moving_statistic(values, np.sum, size=2)
array([ 7, 24])
>>> allel.moving_statistic(values, np.sum, size=2, step=1)
array([ 7, 13, 24])
"""
windows = index_windows(values, size, start, stop, step)
# setup output
out = np.array([statistic(values[i:j], **kwargs) for i, j in windows])
return out
def moving_mean(values, size, start=0, stop=None, step=None):
return moving_statistic(values, statistic=np.mean, size=size,
start=start, stop=stop, step=step)
def moving_std(values, size, start=0, stop=None, step=None):
return moving_statistic(values, statistic=np.std, size=size,
start=start, stop=stop, step=step)
def moving_midpoint(values, size, start=0, stop=None, step=None):
return moving_statistic(values, statistic=lambda v: (v[0] + v[-1])/2,
size=size, start=start, stop=stop, step=step)
def index_windows(values, size, start, stop, step):
"""Convenience function to construct windows for the
:func:`moving_statistic` function.
"""
# determine step
if stop is None:
stop = len(values)
if step is None:
# non-overlapping
step = size
# iterate over windows
for window_start in range(start, stop, step):
window_stop = window_start + size
if window_stop > stop:
# ensure all windows are equal sized
return
yield (window_start, window_stop)
def position_windows(pos, size, start, stop, step):
"""Convenience function to construct windows for the
:func:`windowed_statistic` and :func:`windowed_count` functions.
"""
last = False
# determine start and stop positions
if start is None:
start = pos[0]
if stop is None:
stop = pos[-1]
if step is None:
# non-overlapping
step = size
windows = []
for window_start in range(start, stop, step):
# determine window stop
window_stop = window_start + size
if window_stop >= stop:
# last window
window_stop = stop
last = True
else:
window_stop -= 1
windows.append([window_start, window_stop])
if last:
break
return np.asarray(windows)
def window_locations(pos, windows):
"""Locate indices in `pos` corresponding to the start and stop positions
of `windows`.
"""
start_locs = np.searchsorted(pos, windows[:, 0])
stop_locs = np.searchsorted(pos, windows[:, 1], side='right')
locs = np.column_stack((start_locs, stop_locs))
return locs
def windowed_count(pos, size=None, start=None, stop=None, step=None,
windows=None):
"""Count the number of items in windows over a single chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
The item positions in ascending order, using 1-based coordinates..
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
Returns
-------
counts : ndarray, int, shape (n_windows,)
The number of items in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
Notes
-----
The window stop positions are included within a window.
The final window will be truncated to the specified stop position,
and so may be smaller than the other windows.
Examples
--------
Non-overlapping windows::
>>> import allel
>>> pos = [1, 7, 12, 15, 28]
>>> counts, windows = allel.windowed_count(pos, size=10)
>>> counts
array([2, 2, 1])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 28]])
Half-overlapping windows::
>>> counts, windows = allel.windowed_count(pos, size=10, step=5)
>>> counts
array([2, 3, 2, 0, 1])
>>> windows
array([[ 1, 10],
[ 6, 15],
[11, 20],
[16, 25],
[21, 28]])
"""
# assume sorted positions
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
# setup windows
if windows is None:
windows = position_windows(pos, size, start, stop, step)
else:
windows = asarray_ndim(windows, 2)
# find window locations
locs = window_locations(pos, windows)
# count number of items in each window
counts = np.diff(locs, axis=1).reshape(-1)
return counts, windows
def windowed_statistic(pos, values, statistic, size=None, start=None,
stop=None, step=None, windows=None, fill=np.nan):
"""Calculate a statistic from items in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
The item positions in ascending order, using 1-based coordinates..
values : array_like, int, shape (n_items,)
The values to summarise. May also be a tuple of values arrays,
in which case each array will be sliced and passed through to the
statistic function as separate arguments.
statistic : function
The statistic to compute.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where a window is empty, i.e., contains no items.
Returns
-------
out : ndarray, shape (n_windows,)
The value of the statistic for each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
The number of items in each window.
Notes
-----
The window stop positions are included within a window.
The final window will be truncated to the specified stop position,
and so may be smaller than the other windows.
Examples
--------
Count non-zero (i.e., True) items in non-overlapping windows::
>>> import allel
>>> pos = [1, 7, 12, 15, 28]
>>> values = [True, False, True, False, False]
>>> nnz, windows, counts = allel.windowed_statistic(
... pos, values, statistic=np.count_nonzero, size=10
... )
>>> nnz
array([1, 1, 0])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 28]])
>>> counts
array([2, 2, 1])
Compute a sum over items in half-overlapping windows::
>>> values = [3, 4, 2, 6, 9]
>>> x, windows, counts = allel.windowed_statistic(
... pos, values, statistic=np.sum, size=10, step=5, fill=0
... )
>>> x
array([ 7, 12, 8, 0, 9])
>>> windows
array([[ 1, 10],
[ 6, 15],
[11, 20],
[16, 25],
[21, 28]])
>>> counts
array([2, 3, 2, 0, 1])
"""
# assume sorted positions
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
# check lengths are equal
if isinstance(values, tuple):
# assume multiple values arrays
check_equal_length(pos, *values)
else:
# assume a single values array
check_equal_length(pos, values)
# setup windows
if windows is None:
windows = position_windows(pos, size, start, stop, step)
else:
windows = asarray_ndim(windows, 2)
# find window locations
locs = window_locations(pos, windows)
# setup outputs
out = []
counts = []
# iterate over windows
for start_idx, stop_idx in locs:
# calculate number of values in window
n = stop_idx - start_idx
if n == 0:
# window is empty
s = fill
else:
if isinstance(values, tuple):
# assume multiple values arrays
wv = [v[start_idx:stop_idx] for v in values]
s = statistic(*wv)
else:
# assume a single values array
wv = values[start_idx:stop_idx]
s = statistic(wv)
# store outputs
out.append(s)
counts.append(n)
# convert to arrays for output
return np.asarray(out), windows, np.asarray(counts)
def per_base(x, windows, is_accessible=None, fill=np.nan):
"""Calculate the per-base value of a windowed statistic.
Parameters
----------
x : array_like, shape (n_windows,)
The statistic to average per-base.
windows : array_like, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop)
positions using 1-based coordinates.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
fill : object, optional
Use this value where there are no accessible bases in a window.
Returns
-------
y : ndarray, float, shape (n_windows,)
The input array divided by the number of (accessible) bases in each
window.
n_bases : ndarray, int, shape (n_windows,)
The number of (accessible) bases in each window
"""
# calculate window sizes
if is_accessible is None:
# N.B., window stops are included
n_bases = np.diff(windows, axis=1).reshape(-1) + 1
else:
n_bases = np.array([np.count_nonzero(is_accessible[i-1:j])
for i, j in windows])
# deal with multidimensional x
if x.ndim == 1:
pass
elif x.ndim == 2:
n_bases = n_bases[:, None]
else:
raise NotImplementedError('only arrays of 1 or 2 dimensions supported')
# calculate density per-base
with ignore_invalid():
y = np.where(n_bases > 0, x / n_bases, fill)
# restore to 1-dimensional
if n_bases.ndim > 1:
n_bases = n_bases.reshape(-1)
return y, n_bases
def equally_accessible_windows(is_accessible, size, start=0, stop=None, step=None):
"""Create windows each containing the same number of accessible bases.
Parameters
----------
is_accessible : array_like, bool, shape (n_bases,)
Array defining accessible status of all bases on a contig/chromosome.
size : int
Window size (number of accessible bases).
start : int, optional
The genome position at which to start.
stop : int, optional
The genome position at which to stop.
step : int, optional
The number of accessible sites between start positions
of windows. If not given, defaults to the window size, i.e.,
non-overlapping windows. Use half the window size to get
half-overlapping windows.
Returns
-------
windows : ndarray, int, shape (n_windows, 2)
Window start/stop positions (1-based).
"""
pos_accessible, = np.nonzero(is_accessible)
pos_accessible += 1 # convert to 1-based coordinates
# N.B., need some care in handling start and stop positions, these are
# genomic positions at which to start and stop the windows
if start:
pos_accessible = pos_accessible[pos_accessible >= start]
if stop:
pos_accessible = pos_accessible[pos_accessible <= stop]
# now construct moving windows
windows = moving_statistic(pos_accessible, lambda v: [v[0], v[-1]],
size=size, step=step)
return windows
| |
import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if False and autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import math
import os
import unittest
from telemetry.core import util
from telemetry.util import external_modules
try:
np = external_modules.ImportRequiredModule('numpy')
cv2 = external_modules.ImportRequiredModule('cv2')
except (ImportError, NotImplementedError) as err:
pass
else:
# pylint: disable=W0212
class ScreenFinderTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ScreenFinderTest, self).__init__(*args, **kwargs)
# Import modules with dependencies that may not be preset in test setup so
# that importing this unit test doesn't cause the test runner to raise an
# exception.
from telemetry.image_processing import screen_finder
from telemetry.image_processing.io import fake_frame_generator
from telemetry.image_processing.io import video_file_frame_generator
self.FakeFrameGenerator = fake_frame_generator.FakeFrameGenerator
self.VideoFileFrameGenerator = \
video_file_frame_generator.VideoFileFrameGenerator
self.ScreenFinder = screen_finder.ScreenFinder
def _GetScreenFinder(self, video_filename):
if not video_filename:
fg = self.FakeFrameGenerator()
else:
vid = os.path.join(util.GetUnittestDataDir(), video_filename)
fg = self.VideoFileFrameGenerator(vid)
return self.ScreenFinder(fg)
def testBasicFunctionality(self):
def CheckCorners(corners, expected):
for i in xrange(len(corners)):
for j in xrange(len(corners[i])):
self.assertAlmostEqual(corners[i][j], expected[i][j], delta=1.1)
expected = [[314, 60], [168, 58], [162, 274], [311, 276]]
sf = self._GetScreenFinder('screen_3_frames.mov')
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height, width = screen.shape[:2]
self.assertAlmostEqual(height, 226, delta=2)
self.assertAlmostEqual(width, 156, delta=2)
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height1, width1 = screen.shape[:2]
self.assertEqual(width, width1)
self.assertEqual(height, height1)
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height2, width2 = screen.shape[:2]
self.assertEqual(width, width2)
self.assertEqual(height, height2)
self.assertFalse(sf.HasNext())
error = ''
try:
sf.GetNext()
except RuntimeError as e:
error = str(e)
self.assertEqual(error, 'No more frames available.')
def testHasMovedFast(self):
sf = self._GetScreenFinder(None)
prev_corners = np.asfarray(([1000, 1000], [0, 1000], [0, 0], [1000, 0]))
self.assertFalse(sf._HasMovedFast(prev_corners, prev_corners))
not_moved = copy.deepcopy(prev_corners)
not_moved[0][1] += 1
not_moved[1][1] += 1
not_moved[3][0] += 0.9
self.assertFalse(sf._HasMovedFast(not_moved, prev_corners))
moved = copy.deepcopy(prev_corners)
moved[0][1] += math.sqrt(0.5)
moved[0][0] += math.sqrt(0.5)
moved[1][1] += 2.1
self.assertTrue(sf._HasMovedFast(moved, prev_corners))
def testPointConnectsToCorners(self):
sf = self._GetScreenFinder(None)
line1 = np.asfarray(((0, 0, 1, 0)))
line2 = np.asfarray(((0, 0, 0, 1)))
point = np.asfarray((0, 0))
point_info = (point, line1, line2)
corners = np.asfarray(((1, 0), (0, 1)))
self.assertFalse(sf._PointConnectsToCorners(corners, point_info, 1))
corners = np.append(corners, (100, 1))
corners = np.append(corners, (1, 100))
corners = corners.reshape(-1, 2)
self.assertTrue(sf._PointConnectsToCorners(corners, point_info, 2))
self.assertFalse(sf._PointConnectsToCorners(corners, point_info, 0.5))
corners = np.append(corners, (100, 0))
corners = np.append(corners, (0, 100))
corners = corners.reshape(-1, 2)
self.assertTrue(sf._PointConnectsToCorners(corners, point_info, 0))
def testFindIntersections(self):
def _BuildResult(point, line1, line2):
return [point, np.asfarray(line1).tolist(), np.asfarray(line2).tolist()]
def _IntersectionResultsToList(results):
result_list = []
for result in results:
point, line1, line2 = result
p = np.round(point).tolist()
l1 = np.round(line1).tolist()
l2 = np.round(line2).tolist()
result_list.append([p, l1, l2])
return result_list
sf = self._GetScreenFinder(None)
expected = []
lines = []
# Box with corners at (0, 0), (1000, 0), (0, 1000), (1000, 1000)
lines.append(np.asfarray(((0, 1001, 0, -1))))
lines.append(np.asfarray(((-1, 0, 1001, 0))))
lines.append(np.asfarray(((1000, 1001, 1000, -1))))
lines.append(np.asfarray(((-1, 1000, 1001, 1000))))
expected.append(_BuildResult([0, 0], lines[0], lines[1]))
expected.append(_BuildResult([0, 1000], lines[0], lines[3]))
expected.append(_BuildResult([1000, 0], lines[1], lines[2]))
expected.append(_BuildResult([1000, 1000], lines[2], lines[3]))
# crosses 2 lines at 45 degrees.
lines.append(np.asfarray(((0, 500, 500, 0))))
expected.append(_BuildResult([0, 500], lines[0], lines[4]))
expected.append(_BuildResult([500, 0], lines[1], lines[4]))
# crosses 1 line at > 45 degrees, 1 line at < 45 degrees.
lines.append(np.asfarray(((0, 400, 600, 0))))
expected.append(_BuildResult([0, 400], lines[0], lines[5]))
# Test without previous corner data, all intersections should be found.
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
# Now introduce previous corners, but also reset conditions. No
# intersections should be lost.
corners = ((1000, 1000), (0, 1000), (0, 0), (1000, 0))
sf._prev_corners = np.asfarray(corners, np.float32)
sf._lost_corner_frames = sf.RESET_AFTER_N_BAD_FRAMES + 1
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
# Remove reset conditions, so intersections not near corners will be lost.
sf._lost_corner_frames = sf.RESET_AFTER_N_BAD_FRAMES
# First 4 intersections are the ones at the old corner locations.
expected = expected[0:4]
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
def testPointIsCloseToPreviousCorners(self):
sf = self._GetScreenFinder(None)
corners = ((1000, 1000), (0, 1000), (0, 0), (1000, 0))
sf._prev_corners = np.asfarray(corners, np.float32)
dist = math.sqrt(sf.MAX_INTERFRAME_MOTION)
sidedist1 = math.sqrt(sf.MAX_INTERFRAME_MOTION) / math.sqrt(2) - (1e-13)
sidedist2 = math.sqrt(sf.MAX_INTERFRAME_MOTION) / math.sqrt(2) + (1e-13)
point1 = (corners[3][0] + dist, corners[3][1])
self.assertTrue(sf._PointIsCloseToPreviousCorners(point1))
point2 = (corners[3][0] + sidedist1, corners[3][1] + sidedist1)
self.assertTrue(sf._PointIsCloseToPreviousCorners(point2))
point3 = (corners[1][0] + sidedist2, corners[1][1] + sidedist2)
self.assertFalse(sf._PointIsCloseToPreviousCorners(point3))
def testLooksLikeCorner(self):
# TODO: Probably easier to just do end to end tests.
pass
def testCornerData(self):
cd = self.ScreenFinder.CornerData('a', 'b', 'c', 'd', 'e')
self.assertEqual(cd.corner_index, 'a')
self.assertEqual(cd.corner_location, 'b')
self.assertEqual(cd.brightness_score, 'c')
self.assertEqual(cd.line1, 'd')
self.assertEqual(cd.line2, 'e')
cd_list = []
cd_list.append(self.ScreenFinder.CornerData(0, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(3, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(1, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(2, None, None, None, None))
cd_list.sort()
for i in range(len(cd_list)):
self.assertEqual(i, cd_list[i].corner_index)
def testFindCorners(self):
# TODO: Probably easier to just do end to end tests.
pass
def testDeDupCorners(self):
sf = self._GetScreenFinder(None)
data = []
lines = []
lines.append(np.asfarray((0, 1001, 0, -1)))
lines.append(np.asfarray((-1, 0, 1001, 0)))
lines.append(np.asfarray((1000, 1001, 1000, -1)))
lines.append(np.asfarray((-1, 1000, 1001, 1000)))
lines.append(np.asfarray((0, 10, 10, 0)))
lines.append(np.asfarray((-1, 1001, 1001, 1001)))
corners = np.asfarray(((1000, 1000), (0, 1000), (0, 0),
(1000, 0), (0, 10), (10, 0), (1000, 1001)))
data.append(self.ScreenFinder.CornerData(2, corners[2], 100,
lines[0], lines[1]))
data.append(self.ScreenFinder.CornerData(1, corners[1], 100,
lines[0], lines[3]))
data.append(self.ScreenFinder.CornerData(3, corners[3], 100,
lines[1], lines[2]))
data.append(self.ScreenFinder.CornerData(0, corners[0], 100,
lines[2], lines[3]))
data.append(self.ScreenFinder.CornerData(2, corners[4], 120,
lines[0], lines[4]))
data.append(self.ScreenFinder.CornerData(2, corners[5], 110,
lines[1], lines[4]))
data.append(self.ScreenFinder.CornerData(0, corners[6], 110,
lines[2], lines[5]))
dedup = copy.copy(data)
# Tests 2 non-duplicate corners, 1 corner with connected and unconnected
# corners, and 1 corner with two connected corners.
sf._DeDupCorners(dedup, corners)
self.assertEqual(len(dedup), 4)
self.assertIn(data[0], dedup)
self.assertIn(data[1], dedup)
self.assertIn(data[2], dedup)
self.assertIn(data[6], dedup)
# Same test, but this time the corner with connected and unconnected
# corners now only contains unconnected corners.
del data[0]
corners = np.delete(corners, 2, axis=0)
dedup2 = copy.copy(data)
sf._DeDupCorners(dedup2, corners)
self.assertEqual(len(dedup2), 4)
self.assertIn(data[3], dedup2)
self.assertIn(data[0], dedup2)
self.assertIn(data[1], dedup2)
self.assertIn(data[5], dedup2)
def testFindExactCorners(self):
sf = self._GetScreenFinder(None)
img = np.zeros((3, 3), np.uint8)
img[1][0] = 255
img[0][1] = 255
img[1][2] = 255
img[2][1] = 255
sf._frame_edges = img
corners = np.asfarray([(1, 1), (1, 1), (1, 1), (1, 1)])
expected = np.asfarray([(2, 0), (0, 0), (0, 2), (2, 2)])
ret = sf._FindExactCorners(corners)
np.testing.assert_equal(ret, expected)
img2 = np.zeros((3, 3), np.uint8)
img2[1][0] = 255
img2[1][1] = 255
img2[2][2] = 255
img2[2][1] = 255
sf._frame_edges = img2
expected2 = [(2, 1), (0, 1), (0, 2), (2, 2)]
ret2 = sf._FindExactCorners(corners)
np.testing.assert_equal(ret2, expected2)
def testSmoothCorners(self):
sf = self._GetScreenFinder(None)
corners = [[10, 10], [10, 10], [10, 10], [10, 10]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, corners)
corners = [[0, 0], [0, 0], [0, 0], [0, 0]]
expected = [[5, 5], [5, 5], [5, 5], [5, 5]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, expected)
expected = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, expected)
def testGetTransform(self):
sf = self._GetScreenFinder(None)
corners = np.array([[100, 1000], [0, 1000], [0, 0], [100, 0]], np.float32)
transform, w, h = sf._GetTransform(corners, 1)
transform = np.round(transform, 2)
expected = [[1., 0., 1.], [-0., -1., 1001.], [0., -0., 1.]]
self.assertListEqual(transform.tolist(), expected)
self.assertEqual(w, 102)
self.assertEqual(h, 1002)
corners = np.array([(200, 2000), (0, 2000), (0, 0), (200, 0)], np.float32)
transform, w, h = sf._GetTransform(corners, 5)
transform = np.round(transform, 2)
expected = [[0.5, 0.0, 5.0], [-0.0, -0.5, 1005.0], [-0.0, 0.0, 1.0]]
self.assertListEqual(transform.tolist(), expected)
self.assertEqual(w, 110)
self.assertEqual(h, 1010)
def testNewScreenLocation(self):
sf = self._GetScreenFinder(None)
corners_2 = np.asfarray([[np.nan, np.nan], [0, 1000], [np.nan, np.nan],
[1000, 0]])
corners_3 = np.asfarray([[1000, 1000], [0, 1000], [np.nan, np.nan],
[1000, 0]])
corners_4 = np.asfarray([[1000, 1000], [0, 1000], [0, 0], [1000, 0]])
lines = []
# Box with corners at (0, 0), (1000, 0), (0, 1000), (1000, 1000)
lines.append(np.asfarray(((0, 1001, 0, -1))))
lines.append(np.asfarray(((-1, 0, 1001, 0))))
lines.append(np.asfarray(((1000, 1001, 1000, -1))))
lines.append(np.asfarray(((-1, 1000, 1001, 1000))))
# Additional intersections near a corner.
lines.append(np.asfarray(((0, 3, 7, 0))))
lines.append(np.asfarray(((0, 4, 6, 0))))
intersections = sf._FindIntersections(lines)
failed = False
try:
sf._NewScreenLocation(corners_3, 1, intersections)
except self.ScreenFinder.ScreenNotFoundError:
failed = True
self.assertTrue(failed)
sf._lost_corner_frames = 10
sf._lost_corners = [True, True, True, True]
ret = sf._NewScreenLocation(corners_4, 0, intersections)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, False, False])
self.assertEqual(sf._lost_corner_frames, 0)
sf._prev_corners = corners_4
ret = sf._NewScreenLocation(corners_3, 1, intersections)
ret = np.round(ret)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, True, False])
self.assertEqual(sf._lost_corner_frames, 1)
sf._prev_corners = np.asfarray([(1000, 1000), (0, 1000),
(0, 3), (1000, 0)])
ret = sf._NewScreenLocation(corners_3, 1, intersections)
ret = np.round(ret)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, True, False])
self.assertEqual(sf._lost_corner_frames, 2)
ret = sf._NewScreenLocation(corners_2, 2, intersections)
ret = np.round(ret)
expected = [[1000, 1000], [0, 1000], [0, 3], [1000, 0]]
np.testing.assert_equal(ret, expected)
self.assertListEqual(sf._lost_corners, [True, False, True, False])
self.assertEqual(sf._lost_corner_frames, 3)
| |
import ujson as json
from v20.base_entity import BaseEntity
from v20.base_entity import EntityDict
from v20.request import Request
from v20 import spec_properties
class Position(BaseEntity):
"""
The specification of a Position within an Account.
"""
#
# Format string used when generating a summary for this object
#
_summary_format = "{instrument}, {pl} PL {unrealizedPL} UPL"
#
# Format string used when generating a name for this object
#
_name_format = "Position"
#
# Property metadata for this object
#
_properties = spec_properties.position_Position
def __init__(self, **kwargs):
"""
Create a new Position instance
"""
super(Position, self).__init__()
#
# The Position's Instrument.
#
self.instrument = kwargs.get("instrument")
#
# Profit/loss realized by the Position over the lifetime of the
# Account.
#
self.pl = kwargs.get("pl")
#
# The unrealized profit/loss of all open Trades that contribute to this
# Position.
#
self.unrealizedPL = kwargs.get("unrealizedPL")
#
# Margin currently used by the Position.
#
self.marginUsed = kwargs.get("marginUsed")
#
# Profit/loss realized by the Position since the Account's resettablePL
# was last reset by the client.
#
self.resettablePL = kwargs.get("resettablePL")
#
# The total amount of financing paid/collected for this instrument over
# the lifetime of the Account.
#
self.financing = kwargs.get("financing")
#
# The total amount of commission paid for this instrument over the
# lifetime of the Account.
#
self.commission = kwargs.get("commission")
#
# The total amount of fees charged over the lifetime of the Account for
# the execution of guaranteed Stop Loss Orders for this instrument.
#
self.guaranteedExecutionFees = kwargs.get("guaranteedExecutionFees")
#
# The details of the long side of the Position.
#
self.long = kwargs.get("long")
#
# The details of the short side of the Position.
#
self.short = kwargs.get("short")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new Position from a dict (generally from loading a JSON
response). The data used to instantiate the Position is a shallow copy
of the dict passed in, with any complex child types instantiated
appropriately.
"""
data = data.copy()
if data.get('pl') is not None:
data['pl'] = ctx.convert_decimal_number(
data.get('pl')
)
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(
data.get('unrealizedPL')
)
if data.get('marginUsed') is not None:
data['marginUsed'] = ctx.convert_decimal_number(
data.get('marginUsed')
)
if data.get('resettablePL') is not None:
data['resettablePL'] = ctx.convert_decimal_number(
data.get('resettablePL')
)
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(
data.get('financing')
)
if data.get('commission') is not None:
data['commission'] = ctx.convert_decimal_number(
data.get('commission')
)
if data.get('guaranteedExecutionFees') is not None:
data['guaranteedExecutionFees'] = ctx.convert_decimal_number(
data.get('guaranteedExecutionFees')
)
if data.get('long') is not None:
data['long'] = \
ctx.position.PositionSide.from_dict(
data['long'], ctx
)
if data.get('short') is not None:
data['short'] = \
ctx.position.PositionSide.from_dict(
data['short'], ctx
)
return Position(**data)
class PositionSide(BaseEntity):
"""
The representation of a Position for a single direction (long or short).
"""
#
# Format string used when generating a summary for this object
#
_summary_format = "{units} @ {averagePrice}, {pl} PL {unrealizedPL} UPL"
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.position_PositionSide
def __init__(self, **kwargs):
"""
Create a new PositionSide instance
"""
super(PositionSide, self).__init__()
#
# Number of units in the position (negative value indicates short
# position, positive indicates long position).
#
self.units = kwargs.get("units")
#
# Volume-weighted average of the underlying Trade open prices for the
# Position.
#
self.averagePrice = kwargs.get("averagePrice")
#
# List of the open Trade IDs which contribute to the open Position.
#
self.tradeIDs = kwargs.get("tradeIDs")
#
# Profit/loss realized by the PositionSide over the lifetime of the
# Account.
#
self.pl = kwargs.get("pl")
#
# The unrealized profit/loss of all open Trades that contribute to this
# PositionSide.
#
self.unrealizedPL = kwargs.get("unrealizedPL")
#
# Profit/loss realized by the PositionSide since the Account's
# resettablePL was last reset by the client.
#
self.resettablePL = kwargs.get("resettablePL")
#
# The total amount of financing paid/collected for this PositionSide
# over the lifetime of the Account.
#
self.financing = kwargs.get("financing")
#
# The total amount of fees charged over the lifetime of the Account for
# the execution of guaranteed Stop Loss Orders attached to Trades for
# this PositionSide.
#
self.guaranteedExecutionFees = kwargs.get("guaranteedExecutionFees")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new PositionSide from a dict (generally from loading a
JSON response). The data used to instantiate the PositionSide is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('units') is not None:
data['units'] = ctx.convert_decimal_number(
data.get('units')
)
if data.get('averagePrice') is not None:
data['averagePrice'] = ctx.convert_decimal_number(
data.get('averagePrice')
)
if data.get('pl') is not None:
data['pl'] = ctx.convert_decimal_number(
data.get('pl')
)
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(
data.get('unrealizedPL')
)
if data.get('resettablePL') is not None:
data['resettablePL'] = ctx.convert_decimal_number(
data.get('resettablePL')
)
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(
data.get('financing')
)
if data.get('guaranteedExecutionFees') is not None:
data['guaranteedExecutionFees'] = ctx.convert_decimal_number(
data.get('guaranteedExecutionFees')
)
return PositionSide(**data)
class CalculatedPositionState(BaseEntity):
"""
The dynamic (calculated) state of a Position
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.position_CalculatedPositionState
def __init__(self, **kwargs):
"""
Create a new CalculatedPositionState instance
"""
super(CalculatedPositionState, self).__init__()
#
# The Position's Instrument.
#
self.instrument = kwargs.get("instrument")
#
# The Position's net unrealized profit/loss
#
self.netUnrealizedPL = kwargs.get("netUnrealizedPL")
#
# The unrealized profit/loss of the Position's long open Trades
#
self.longUnrealizedPL = kwargs.get("longUnrealizedPL")
#
# The unrealized profit/loss of the Position's short open Trades
#
self.shortUnrealizedPL = kwargs.get("shortUnrealizedPL")
#
# Margin currently used by the Position.
#
self.marginUsed = kwargs.get("marginUsed")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new CalculatedPositionState from a dict (generally from
loading a JSON response). The data used to instantiate the
CalculatedPositionState is a shallow copy of the dict passed in, with
any complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('netUnrealizedPL') is not None:
data['netUnrealizedPL'] = ctx.convert_decimal_number(
data.get('netUnrealizedPL')
)
if data.get('longUnrealizedPL') is not None:
data['longUnrealizedPL'] = ctx.convert_decimal_number(
data.get('longUnrealizedPL')
)
if data.get('shortUnrealizedPL') is not None:
data['shortUnrealizedPL'] = ctx.convert_decimal_number(
data.get('shortUnrealizedPL')
)
if data.get('marginUsed') is not None:
data['marginUsed'] = ctx.convert_decimal_number(
data.get('marginUsed')
)
return CalculatedPositionState(**data)
class EntitySpec(object):
"""
The position.EntitySpec wraps the position module's type definitions
and API methods so they can be easily accessed through an instance of a v20
Context.
"""
Position = Position
PositionSide = PositionSide
CalculatedPositionState = CalculatedPositionState
def __init__(self, ctx):
self.ctx = ctx
def list(
self,
accountID,
**kwargs
):
"""
List all Positions for an Account. The Positions returned are for every
instrument that has had a position during the lifetime of an the
Account.
Args:
accountID:
Account Identifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/accounts/{accountID}/positions'
)
request.set_path_param(
'accountID',
accountID
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('positions') is not None:
parsed_body['positions'] = [
self.ctx.position.Position.from_dict(d, self.ctx)
for d in jbody.get('positions')
]
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def list_open(
self,
accountID,
**kwargs
):
"""
List all open Positions for an Account. An open Position is a Position
in an Account that currently has a Trade opened for it.
Args:
accountID:
Account Identifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/accounts/{accountID}/openPositions'
)
request.set_path_param(
'accountID',
accountID
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('positions') is not None:
parsed_body['positions'] = [
self.ctx.position.Position.from_dict(d, self.ctx)
for d in jbody.get('positions')
]
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def get(
self,
accountID,
instrument,
**kwargs
):
"""
Get the details of a single Instrument's Position in an Account. The
Position may by open or not.
Args:
accountID:
Account Identifier
instrument:
Name of the Instrument
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/accounts/{accountID}/positions/{instrument}'
)
request.set_path_param(
'accountID',
accountID
)
request.set_path_param(
'instrument',
instrument
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('position') is not None:
parsed_body['position'] = \
self.ctx.position.Position.from_dict(
jbody['position'],
self.ctx
)
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def close(
self,
accountID,
instrument,
**kwargs
):
"""
Closeout the open Position for a specific instrument in an Account.
Args:
accountID:
Account Identifier
instrument:
Name of the Instrument
longUnits:
Indication of how much of the long Position to closeout. Either
the string "ALL", the string "NONE", or a DecimalNumber
representing how many units of the long position to close using
a PositionCloseout MarketOrder. The units specified must always
be positive.
longClientExtensions:
The client extensions to add to the MarketOrder used to close
the long position.
shortUnits:
Indication of how much of the short Position to closeout.
Either the string "ALL", the string "NONE", or a DecimalNumber
representing how many units of the short position to close
using a PositionCloseout MarketOrder. The units specified must
always be positive.
shortClientExtensions:
The client extensions to add to the MarketOrder used to close
the short position.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'PUT',
'/v3/accounts/{accountID}/positions/{instrument}/close'
)
request.set_path_param(
'accountID',
accountID
)
request.set_path_param(
'instrument',
instrument
)
body = EntityDict()
if 'longUnits' in kwargs:
body.set('longUnits', kwargs['longUnits'])
if 'longClientExtensions' in kwargs:
body.set('longClientExtensions', kwargs['longClientExtensions'])
if 'shortUnits' in kwargs:
body.set('shortUnits', kwargs['shortUnits'])
if 'shortClientExtensions' in kwargs:
body.set('shortClientExtensions', kwargs['shortClientExtensions'])
request.set_body_dict(body.dict)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('longOrderCreateTransaction') is not None:
parsed_body['longOrderCreateTransaction'] = \
self.ctx.transaction.MarketOrderTransaction.from_dict(
jbody['longOrderCreateTransaction'],
self.ctx
)
if jbody.get('longOrderFillTransaction') is not None:
parsed_body['longOrderFillTransaction'] = \
self.ctx.transaction.OrderFillTransaction.from_dict(
jbody['longOrderFillTransaction'],
self.ctx
)
if jbody.get('longOrderCancelTransaction') is not None:
parsed_body['longOrderCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['longOrderCancelTransaction'],
self.ctx
)
if jbody.get('shortOrderCreateTransaction') is not None:
parsed_body['shortOrderCreateTransaction'] = \
self.ctx.transaction.MarketOrderTransaction.from_dict(
jbody['shortOrderCreateTransaction'],
self.ctx
)
if jbody.get('shortOrderFillTransaction') is not None:
parsed_body['shortOrderFillTransaction'] = \
self.ctx.transaction.OrderFillTransaction.from_dict(
jbody['shortOrderFillTransaction'],
self.ctx
)
if jbody.get('shortOrderCancelTransaction') is not None:
parsed_body['shortOrderCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['shortOrderCancelTransaction'],
self.ctx
)
if jbody.get('relatedTransactionIDs') is not None:
parsed_body['relatedTransactionIDs'] = \
jbody.get('relatedTransactionIDs')
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
elif str(response.status) == "400":
if jbody.get('longOrderRejectTransaction') is not None:
parsed_body['longOrderRejectTransaction'] = \
self.ctx.transaction.MarketOrderRejectTransaction.from_dict(
jbody['longOrderRejectTransaction'],
self.ctx
)
if jbody.get('shortOrderRejectTransaction') is not None:
parsed_body['shortOrderRejectTransaction'] = \
self.ctx.transaction.MarketOrderRejectTransaction.from_dict(
jbody['shortOrderRejectTransaction'],
self.ctx
)
if jbody.get('relatedTransactionIDs') is not None:
parsed_body['relatedTransactionIDs'] = \
jbody.get('relatedTransactionIDs')
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('longOrderRejectTransaction') is not None:
parsed_body['longOrderRejectTransaction'] = \
self.ctx.transaction.MarketOrderRejectTransaction.from_dict(
jbody['longOrderRejectTransaction'],
self.ctx
)
if jbody.get('shortOrderRejectTransaction') is not None:
parsed_body['shortOrderRejectTransaction'] = \
self.ctx.transaction.MarketOrderRejectTransaction.from_dict(
jbody['shortOrderRejectTransaction'],
self.ctx
)
if jbody.get('relatedTransactionIDs') is not None:
parsed_body['relatedTransactionIDs'] = \
jbody.get('relatedTransactionIDs')
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
| |
"""The tests for the DD-WRT device tracker platform."""
import os
import unittest
from unittest import mock
import logging
import re
import requests
import requests_mock
import pytest
from homeassistant import config
from homeassistant.setup import setup_component
from homeassistant.components import device_tracker
from homeassistant.const import (
CONF_PLATFORM, CONF_HOST, CONF_PASSWORD, CONF_USERNAME)
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.util import slugify
from tests.common import (
get_test_home_assistant, assert_setup_component, load_fixture,
mock_component)
from ...test_util.aiohttp import mock_aiohttp_client
TEST_HOST = '127.0.0.1'
_LOGGER = logging.getLogger(__name__)
@pytest.mark.skip
class TestDdwrt(unittest.TestCase):
"""Tests for the Ddwrt device tracker platform."""
hass = None
def run(self, result=None):
"""Mock out http calls to macvendor API for whole test suite."""
with mock_aiohttp_client() as aioclient_mock:
macvendor_re = re.compile('http://api.macvendors.com/.*')
aioclient_mock.get(macvendor_re, text='')
super().run(result)
def setup_method(self, _):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, 'zone')
def teardown_method(self, _):
"""Stop everything that was started."""
self.hass.stop()
try:
os.remove(self.hass.config.path(device_tracker.YAML_DEVICES))
except FileNotFoundError:
pass
@mock.patch('homeassistant.components.device_tracker.ddwrt._LOGGER.error')
def test_login_failed(self, mock_error):
"""Create a Ddwrt scanner with wrong credentials."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
status_code=401)
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.assertTrue(
'Failed to authenticate' in
str(mock_error.call_args_list[-1]))
@mock.patch('homeassistant.components.device_tracker.ddwrt._LOGGER.error')
def test_invalid_response(self, mock_error):
"""Test error handling when response has an error status."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
status_code=444)
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.assertTrue(
'Invalid response from DD-WRT' in
str(mock_error.call_args_list[-1]))
@mock.patch('homeassistant.components.device_tracker._LOGGER.error')
@mock.patch('homeassistant.components.device_tracker.'
'ddwrt.DdWrtDeviceScanner.get_ddwrt_data', return_value=None)
def test_no_response(self, data_mock, error_mock):
"""Create a Ddwrt scanner with no response in init, should fail."""
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.assertTrue(
'Error setting up platform' in
str(error_mock.call_args_list[-1]))
@mock.patch('homeassistant.components.device_tracker.ddwrt.requests.get',
side_effect=requests.exceptions.Timeout)
@mock.patch('homeassistant.components.device_tracker.ddwrt._LOGGER.error')
def test_get_timeout(self, mock_error, mock_request):
"""Test get Ddwrt data with request time out."""
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.assertTrue(
'Connection to the router timed out' in
str(mock_error.call_args_list[-1]))
def test_scan_devices(self):
"""Test creating device info (MAC, name) from response.
The created known_devices.yaml device info is compared
to the DD-WRT Lan Status request response fixture.
This effectively checks the data parsing functions.
"""
status_lan = load_fixture('Ddwrt_Status_Lan.txt')
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Wireless.txt'))
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST,
text=status_lan)
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.hass.block_till_done()
path = self.hass.config.path(device_tracker.YAML_DEVICES)
devices = config.load_yaml_config_file(path)
for device in devices:
self.assertIn(devices[device]['mac'], status_lan)
self.assertIn(slugify(devices[device]['name']), status_lan)
def test_device_name_no_data(self):
"""Test creating device info (MAC only) when no response."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Wireless.txt'))
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST, text=None)
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.hass.block_till_done()
path = self.hass.config.path(device_tracker.YAML_DEVICES)
devices = config.load_yaml_config_file(path)
status_lan = load_fixture('Ddwrt_Status_Lan.txt')
for device in devices:
_LOGGER.error(devices[device])
self.assertIn(devices[device]['mac'], status_lan)
def test_device_name_no_dhcp(self):
"""Test creating device info (MAC) when missing dhcp response."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Wireless.txt'))
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Lan.txt').
replace('dhcp_leases', 'missing'))
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.hass.block_till_done()
path = self.hass.config.path(device_tracker.YAML_DEVICES)
devices = config.load_yaml_config_file(path)
status_lan = load_fixture('Ddwrt_Status_Lan.txt')
for device in devices:
_LOGGER.error(devices[device])
self.assertIn(devices[device]['mac'], status_lan)
def test_update_no_data(self):
"""Test error handling of no response when active devices checked."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
# First request has to work to set up connection
[{'text': load_fixture('Ddwrt_Status_Wireless.txt')},
# Second request to get active devices fails
{'text': None}])
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Lan.txt'))
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
def test_update_wrong_data(self):
"""Test error handling of bad response when active devices checked."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Wireless.txt').
replace('active_wireless', 'missing'))
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Lan.txt'))
with assert_setup_component(1, DOMAIN):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
| |
import contextlib
import threading
from django.conf import settings
from django.db import models, transaction
from django.utils import encoding, translation
import caching.base
import elasticsearch
import multidb.pinning
import queryset_transform
from . import search
# Needed to set up url prefix signals.
from . import signals # noqa
_locals = threading.local()
_locals.skip_cache = False
@contextlib.contextmanager
def use_master():
"""Within this context, all queries go to the master."""
old = getattr(multidb.pinning._locals, 'pinned', False)
multidb.pinning.pin_this_thread()
try:
yield
finally:
multidb.pinning._locals.pinned = old
@contextlib.contextmanager
def skip_cache():
"""Within this context, no queries come from cache."""
old = getattr(_locals, 'skip_cache', False)
_locals.skip_cache = True
try:
yield
finally:
_locals.skip_cache = old
# This is sadly a copy and paste of annotate to get around this
# ticket http://code.djangoproject.com/ticket/14707
def annotate(self, *args, **kwargs):
for arg in args:
if arg.default_alias in kwargs:
raise ValueError("The %s named annotation conflicts with the "
"default name for another annotation."
% arg.default_alias)
kwargs[arg.default_alias] = arg
obj = self._clone()
obj._setup_aggregate_query(kwargs.keys())
# Add the aggregates to the query
for (alias, aggregate_expr) in kwargs.items():
obj.query.add_aggregate(
aggregate_expr, self.model, alias, is_summary=False)
return obj
models.query.QuerySet.annotate = annotate
class TransformQuerySet(queryset_transform.TransformQuerySet):
def pop_transforms(self):
qs = self._clone()
transforms = qs._transform_fns
qs._transform_fns = []
return transforms, qs
def no_transforms(self):
return self.pop_transforms()[1]
def only_translations(self):
"""Remove all transforms except translations."""
from translations import transformer
# Add an extra select so these are cached separately.
return (self.no_transforms().extra(select={'_only_trans': 1})
.transform(transformer.get_trans))
def transform(self, fn):
from . import decorators
f = decorators.skip_cache(fn)
return super(TransformQuerySet, self).transform(f)
class RawQuerySet(models.query.RawQuerySet):
"""A RawQuerySet with __len__."""
def __init__(self, *args, **kw):
super(RawQuerySet, self).__init__(*args, **kw)
self._result_cache = None
def __iter__(self):
if self._result_cache is None:
self._result_cache = list(super(RawQuerySet, self).__iter__())
return iter(self._result_cache)
def __len__(self):
return len(list(self.__iter__()))
class CachingRawQuerySet(RawQuerySet, caching.base.CachingRawQuerySet):
"""A RawQuerySet with __len__ and caching."""
# Make TransformQuerySet one of CachingQuerySet's parents so that we can do
# transforms on objects and then get them cached.
CachingQuerySet = caching.base.CachingQuerySet
CachingQuerySet.__bases__ = (TransformQuerySet,) + CachingQuerySet.__bases__
class UncachedManagerBase(models.Manager):
def get_query_set(self):
qs = self._with_translations(TransformQuerySet(self.model))
return qs
def _with_translations(self, qs):
from translations import transformer
# Since we're attaching translations to the object, we need to stick
# the locale in the query so objects aren't shared across locales.
if hasattr(self.model._meta, 'translated_fields'):
lang = translation.get_language()
qs = qs.transform(transformer.get_trans)
qs = qs.extra(where=['"%s"="%s"' % (lang, lang)])
return qs
def transform(self, fn):
return self.all().transform(fn)
def raw(self, raw_query, params=None, *args, **kwargs):
return RawQuerySet(raw_query, self.model, params=params,
using=self._db, *args, **kwargs)
def safer_get_or_create(self, defaults=None, **kw):
"""
This is subjective, but I don't trust get_or_create until #13906
gets fixed. It's probably fine, but this makes me happy for the moment
and solved a get_or_create we've had in the past.
"""
with transaction.commit_on_success():
try:
return self.get(**kw), False
except self.model.DoesNotExist:
if defaults is not None:
kw.update(defaults)
return self.create(**kw), True
class ManagerBase(caching.base.CachingManager, UncachedManagerBase):
"""
Base for all managers in AMO.
Returns TransformQuerySets from the queryset_transform project.
If a model has translated fields, they'll be attached through a transform
function.
"""
def get_queryset(self):
qs = super(ManagerBase, self).get_queryset()
if getattr(_locals, 'skip_cache', False):
qs = qs.no_cache()
return self._with_translations(qs)
def raw(self, raw_query, params=None, *args, **kwargs):
return CachingRawQuerySet(raw_query, self.model, params=params,
using=self._db, *args, **kwargs)
class _NoChangeInstance(object):
"""A proxy for object instances to make safe operations within an
OnChangeMixin.on_change() callback.
"""
def __init__(self, instance):
self.__instance = instance
def __repr__(self):
return u'<%s for %r>' % (self.__class__.__name__, self.__instance)
def __getattr__(self, attr):
return getattr(self.__instance, attr)
def __setattr__(self, attr, val):
if attr.endswith('__instance'):
# _NoChangeInstance__instance
self.__dict__[attr] = val
else:
setattr(self.__instance, attr, val)
def save(self, *args, **kw):
kw['_signal'] = False
return self.__instance.save(*args, **kw)
def update(self, *args, **kw):
kw['_signal'] = False
return self.__instance.update(*args, **kw)
_on_change_callbacks = {}
# @TODO(Kumar) liberate: move OnChangeMixin Model mixin to nuggets
class OnChangeMixin(object):
"""Mixin for a Model that allows you to observe attribute changes.
Register change observers with::
class YourModel(amo.models.OnChangeMixin,
amo.models.ModelBase):
# ...
pass
YourModel.on_change(callback)
"""
def __init__(self, *args, **kw):
super(OnChangeMixin, self).__init__(*args, **kw)
self._initial_attr = dict(self.__dict__)
@classmethod
def on_change(cls, callback):
"""Register a function to call on save or update to respond to changes.
For example::
def watch_status(old_attr={}, new_attr={},
instance=None, sender=None, **kw):
if old_attr.get('status') != new_attr.get('status'):
# ...
new_instance.save(_signal=False)
TheModel.on_change(watch_status)
``old_atr`` will be a dict of the old instance attributes.
``new_attr`` will be a dict of the new instance attributes, including
any that had not been changed by the operation that triggered the
callback (such as an update only of one field).
.. note::
Any call to instance.save() or instance.update() within a callback
will not trigger any change handlers.
.. note::
Duplicates based on function.__name__ are ignored for a given
class.
"""
existing = _on_change_callbacks.get(cls, [])
if callback.__name__ in [e.__name__ for e in existing]:
return callback
_on_change_callbacks.setdefault(cls, []).append(callback)
return callback
def _send_changes(self, old_attr, new_attr_kw):
new_attr = old_attr.copy()
new_attr.update(new_attr_kw)
for cb in _on_change_callbacks[self.__class__]:
cb(old_attr=old_attr, new_attr=new_attr,
instance=_NoChangeInstance(self), sender=self.__class__)
def save(self, *args, **kw):
"""
Save changes to the model instance.
If _signal=False is in `kw` the on_change() callbacks won't be called.
"""
signal = kw.pop('_signal', True)
result = super(OnChangeMixin, self).save(*args, **kw)
if signal and self.__class__ in _on_change_callbacks:
self._send_changes(self._initial_attr, dict(self.__dict__))
return result
def update(self, **kw):
"""
Shortcut for doing an UPDATE on this object.
If _signal=False is in ``kw`` the post_save signal won't be sent.
"""
signal = kw.pop('_signal', True)
old_attr = dict(self.__dict__)
result = super(OnChangeMixin, self).update(_signal=signal, **kw)
if signal and self.__class__ in _on_change_callbacks:
self._send_changes(old_attr, kw)
return result
class SearchMixin(object):
ES_ALIAS_KEY = 'default'
@classmethod
def _get_index(cls):
indexes = settings.ES_INDEXES
return indexes.get(cls.ES_ALIAS_KEY)
@classmethod
def index(cls, document, id=None, refresh=False, index=None):
"""Wrapper around Elasticsearch.index."""
search.get_es().index(
body=document, index=index or cls._get_index(),
doc_type=cls.get_mapping_type(), id=id, refresh=refresh)
@classmethod
def unindex(cls, id, index=None):
id = str(id)
es = search.get_es()
try:
es.delete(index or cls._get_index(), cls._meta.db_table, id)
except elasticsearch.TransportError:
# Item wasn't found, whatevs.
pass
@classmethod
def search(cls, index=None):
return search.ES(cls, index or cls._get_index())
@classmethod
def get_mapping_type(cls):
return cls._meta.db_table
class ModelBase(SearchMixin, caching.base.CachingMixin, models.Model):
"""
Base class for AMO models to abstract some common features.
* Adds automatic created and modified fields to the model.
* Fetches all translations in one subsequent query during initialization.
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = ManagerBase()
class Meta:
abstract = True
get_latest_by = 'created'
def get_absolute_url(self, *args, **kwargs):
return self.get_url_path(*args, **kwargs)
@classmethod
def _cache_key(cls, pk, db):
"""
Custom django-cache-machine cache key implementation that avoids having
the real db in the key, since we are only using master-slaves we don't
need it and it avoids invalidation bugs with FETCH_BY_ID.
"""
key_parts = ('o', cls._meta, pk, 'default')
return ':'.join(map(encoding.smart_unicode, key_parts))
def reload(self):
"""Reloads the instance from the database."""
from_db = self.__class__.get_unfiltered_manager().get(pk=self.pk)
for field in self.__class__._meta.fields:
try:
setattr(self, field.name, getattr(from_db, field.name))
except models.ObjectDoesNotExist:
# reload() can be called before cleaning up an object of stale
# related fields, when we do soft-deletion for instance. Avoid
# failing because of that.
pass
return self
@classmethod
def get_unfiltered_manager(cls):
"""Return the unfiltered manager from the given class."""
return getattr(cls, 'unfiltered', cls.objects) # Fallback on objects.
def update(self, **kw):
"""
Shortcut for doing an UPDATE on this object.
If _signal=False is in ``kw`` the post_save signal won't be sent.
"""
signal = kw.pop('_signal', True)
cls = self.__class__
for k, v in kw.items():
setattr(self, k, v)
if signal:
# Detect any attribute changes during pre_save and add those to the
# update kwargs.
attrs = dict(self.__dict__)
models.signals.pre_save.send(sender=cls, instance=self)
for k, v in self.__dict__.items():
if attrs[k] != v:
kw[k] = v
setattr(self, k, v)
# We want this to not fail mysteriously for filtered out objects (eg
# deleted or unlisted).
objects = cls.get_unfiltered_manager()
objects.filter(pk=self.pk).update(**kw)
if signal:
models.signals.post_save.send(sender=cls, instance=self,
created=False)
def manual_order(qs, pks, pk_name='id'):
"""
Given a query set and a list of primary keys, return a set of objects from
the query set in that exact order.
"""
if not pks:
return qs.none()
return qs.filter(id__in=pks).extra(
select={'_manual': 'FIELD(%s, %s)' % (pk_name,
','.join(map(str, pks)))},
order_by=['_manual'])
class BlobField(models.Field):
"""MySQL blob column.
This is for using AES_ENCYPT() to store values.
It could maybe turn into a fancy transparent encypt/decrypt field
like http://djangosnippets.org/snippets/2489/
"""
description = "blob"
def db_type(self, **kw):
return 'blob'
class SlugField(models.SlugField):
"""
Django 1.6's SlugField rejects non-ASCII slugs. This field just
keeps the old behaviour of not checking contents.
"""
default_validators = []
class FakeEmail(ModelBase):
message = models.TextField()
class Meta:
db_table = 'fake_email'
| |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
import iso8601
import netaddr
import six
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
class KeyTypeError(TypeError):
def __init__(self, expected, value):
super(KeyTypeError, self).__init__(
_('Key %(key)s must be of type %(expected)s not %(actual)s'
) % {'key': repr(value),
'expected': expected.__name__,
'actual': value.__class__.__name__,
})
class ElementTypeError(TypeError):
def __init__(self, expected, key, value):
super(ElementTypeError, self).__init__(
_('Element %(key)s:%(val)s must be of type %(expected)s'
' not %(actual)s'
) % {'key': key,
'val': repr(value),
'expected': expected,
'actual': value.__class__.__name__,
})
class AbstractFieldType(six.with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def coerce(self, obj, attr, value):
"""This is called to coerce (if possible) a value on assignment.
This method should convert the value given into the designated type,
or throw an exception if this is not possible.
:param:obj: The NovaObject on which an attribute is being set
:param:attr: The name of the attribute being set
:param:value: The value being set
:returns: A properly-typed value
"""
pass
@abc.abstractmethod
def from_primitive(self, obj, attr, value):
"""This is called to deserialize a value.
This method should deserialize a value from the form given by
to_primitive() to the designated type.
:param:obj: The NovaObject on which the value is to be set
:param:attr: The name of the attribute which will hold the value
:param:value: The serialized form of the value
:returns: The natural form of the value
"""
pass
@abc.abstractmethod
def to_primitive(self, obj, attr, value):
"""This is called to serialize a value.
This method should serialize a value to the form expected by
from_primitive().
:param:obj: The NovaObject on which the value is set
:param:attr: The name of the attribute holding the value
:param:value: The natural form of the value
:returns: The serialized form of the value
"""
pass
@abc.abstractmethod
def describe(self):
"""Returns a string describing the type of the field."""
pass
class FieldType(AbstractFieldType):
def coerce(self, obj, attr, value):
return value
def from_primitive(self, obj, attr, value):
return value
def to_primitive(self, obj, attr, value):
return value
def describe(self):
return self.__class__.__name__
class UnspecifiedDefault(object):
pass
class Field(object):
def __init__(self, field_type, nullable=False, default=UnspecifiedDefault):
self._type = field_type
self._nullable = nullable
self._default = default
@property
def nullable(self):
return self._nullable
@property
def default(self):
return self._default
def _null(self, obj, attr):
if self.nullable:
return None
elif self._default != UnspecifiedDefault:
# NOTE(danms): We coerce the default value each time the field
# is set to None as our contract states that we'll let the type
# examine the object and attribute name at that time.
return self._type.coerce(obj, attr, self._default)
else:
raise ValueError(_("Field `%s' cannot be None") % attr)
def coerce(self, obj, attr, value):
"""Coerce a value to a suitable type.
This is called any time you set a value on an object, like:
foo.myint = 1
and is responsible for making sure that the value (1 here) is of
the proper type, or can be sanely converted.
This also handles the potentially nullable or defaultable
nature of the field and calls the coerce() method on a
FieldType to actually do the coercion.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being set
:param:value: The value being set
:returns: The properly-typed value
"""
if value is None:
return self._null(obj, attr)
else:
return self._type.coerce(obj, attr, value)
def from_primitive(self, obj, attr, value):
"""Deserialize a value from primitive form.
This is responsible for deserializing a value from primitive
into regular form. It calls the from_primitive() method on a
FieldType to do the actual deserialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being deserialized
:param:value: The value to be deserialized
:returns: The deserialized value
"""
if value is None:
return None
else:
return self._type.from_primitive(obj, attr, value)
def to_primitive(self, obj, attr, value):
"""Serialize a value to primitive form.
This is responsible for serializing a value to primitive
form. It calls to_primitive() on a FieldType to do the actual
serialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being serialized
:param:value: The value to be serialized
:returns: The serialized value
"""
if value is None:
return None
else:
return self._type.to_primitive(obj, attr, value)
def describe(self):
"""Return a short string describing the type of this field."""
name = self._type.describe()
prefix = self.nullable and 'Nullable' or ''
return prefix + name
class String(FieldType):
def coerce(self, obj, attr, value):
# FIXME(danms): We should really try to avoid the need to do this
if isinstance(value, (basestring, int, long, float,
datetime.datetime)):
return unicode(value)
else:
raise ValueError(_('A string is required here, not %s'),
value.__class__.__name__)
class UUID(FieldType):
def coerce(self, obj, attr, value):
# FIXME(danms): We should actually verify the UUIDness here
return str(value)
class Integer(FieldType):
def coerce(self, obj, attr, value):
return int(value)
class Boolean(FieldType):
def coerce(self, obj, attr, value):
return bool(value)
class DateTime(FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, basestring):
value = timeutils.parse_isotime(value)
elif not isinstance(value, datetime.datetime):
raise ValueError(_('A datetime.datetime is required here'))
if value.utcoffset() is None:
value = value.replace(tzinfo=iso8601.iso8601.Utc())
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, timeutils.parse_isotime(value))
def to_primitive(self, obj, attr, value):
return timeutils.isotime(value)
class IPV4Address(FieldType):
def coerce(self, obj, attr, value):
try:
return netaddr.IPAddress(value, version=4)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return str(value)
class IPV6Address(FieldType):
def coerce(self, obj, attr, value):
try:
return netaddr.IPAddress(value, version=6)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return str(value)
class CompoundFieldType(FieldType):
def __init__(self, element_type, **field_args):
self._element_type = Field(element_type, **field_args)
class List(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, list):
raise ValueError(_('A list is required here'))
for index, element in enumerate(list(value)):
value[index] = self._element_type.coerce(
obj, '%s[%i]' % (attr, index), element)
return value
def to_primitive(self, obj, attr, value):
return [self._element_type.to_primitive(obj, attr, x) for x in value]
def from_primitive(self, obj, attr, value):
return [self._element_type.from_primitive(obj, attr, x) for x in value]
class Dict(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, dict):
raise ValueError(_('A dict is required here'))
for key, element in value.items():
if not isinstance(key, basestring):
raise KeyTypeError(basestring, key)
value[key] = self._element_type.coerce(
obj, '%s["%s"]' % (attr, key), element)
return value
def to_primitive(self, obj, attr, value):
primitive = {}
for key, element in value.items():
primitive[key] = self._element_type.to_primitive(
obj, '%s["%s"]' % (attr, key), element)
return primitive
def from_primitive(self, obj, attr, value):
concrete = {}
for key, element in value.items():
concrete[key] = self._element_type.from_primitive(
obj, '%s["%s"]' % (attr, key), element)
return concrete
class Object(FieldType):
def __init__(self, obj_name, **kwargs):
self._obj_name = obj_name
super(Object, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
try:
obj_name = value.obj_name()
except AttributeError:
obj_name = ""
if obj_name != self._obj_name:
raise ValueError(_('An object of type %s is required here') %
self._obj_name)
return value
def to_primitive(self, obj, attr, value):
return value.obj_to_primitive()
def from_primitive(self, obj, attr, value):
# FIXME(danms): Avoid circular import from base.py
from nova.objects import base as obj_base
return obj_base.NovaObject.obj_from_primitive(value, obj._context)
def describe(self):
return "Object<%s>" % self._obj_name
class NetworkModel(FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, network_model.NetworkInfo):
return value
elif isinstance(value, basestring):
# Hmm, do we need this?
return network_model.NetworkInfo.hydrate(value)
else:
raise ValueError(_('A NetworkModel is required here'))
def to_primitive(self, obj, attr, value):
return value.json()
def from_primitive(self, obj, attr, value):
return network_model.NetworkInfo.hydrate(value)
class CIDR(FieldType):
def coerce(self, obj, attr, value):
try:
network, length = value.split('/')
except (ValueError, AttributeError):
raise ValueError(_('CIDR "%s" is not in proper form') % value)
try:
network = netaddr.IPAddress(network)
except netaddr.AddrFormatError:
raise ValueError(_('Network "%s is not valid') % network)
try:
length = int(length)
assert (length >= 0)
except (ValueError, AssertionError):
raise ValueError(_('Netmask length "%s" is not valid') % length)
if ((network.version == 4 and length > 32) or
(network.version == 6 and length > 128)):
raise ValueError(_('Netmask length "%(length)s" is not valid '
'for IPv%(version)i address') %
{'length': length, 'version': network.version})
return value
class AutoTypedField(Field):
AUTO_TYPE = None
def __init__(self, **kwargs):
super(AutoTypedField, self).__init__(self.AUTO_TYPE, **kwargs)
class StringField(AutoTypedField):
AUTO_TYPE = String()
class UUIDField(AutoTypedField):
AUTO_TYPE = UUID()
class IntegerField(AutoTypedField):
AUTO_TYPE = Integer()
class BooleanField(AutoTypedField):
AUTO_TYPE = Boolean()
class DateTimeField(AutoTypedField):
AUTO_TYPE = DateTime()
class IPV4AddressField(AutoTypedField):
AUTO_TYPE = IPV4Address()
class IPV6AddressField(AutoTypedField):
AUTO_TYPE = IPV6Address()
class DictOfStringsField(AutoTypedField):
AUTO_TYPE = Dict(String())
class DictOfNullableStringsField(AutoTypedField):
AUTO_TYPE = Dict(String(), nullable=True)
class ListOfStringsField(AutoTypedField):
AUTO_TYPE = List(String())
class ObjectField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = Object(objtype)
super(ObjectField, self).__init__(**kwargs)
class ListOfObjectsField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = List(Object(objtype))
super(ListOfObjectsField, self).__init__(**kwargs)
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import base64
import json
import unittest
import mock
from google.appengine.ext import ndb
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import graph_data
class UtilsTest(testing_common.TestCase):
def setUp(self):
super(UtilsTest, self).setUp()
testing_common.SetIsInternalUser('internal@chromium.org', True)
testing_common.SetIsInternalUser('foo@chromium.org', False)
testing_common.SetIsAdministrator('admin@chromium.org', True)
def _AssertMatches(self, test_path, pattern):
"""Asserts that a test path matches a pattern with MatchesPattern."""
test_key = utils.TestKey(test_path)
self.assertTrue(utils.TestMatchesPattern(test_key, pattern))
def _AssertDoesntMatch(self, test_path, pattern):
"""Asserts that a test path doesn't match a pattern with MatchesPattern."""
test_key = utils.TestKey(test_path)
self.assertFalse(utils.TestMatchesPattern(test_key, pattern))
def testMatchesPattern_AllWildcards(self):
self._AssertMatches(
'ChromiumPerf/cros-one/dromaeo.top25/Total', '*/*/*/*')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total', '*/*/*')
def testMatchesPattern_SomeWildcards(self):
self._AssertMatches(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/*/dromaeo.top25/*')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/*/dromaeo.another_page_set/*')
def testMatchesPattern_SomePartialWildcards(self):
self._AssertMatches(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/cros-*/dromaeo.*/Total')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeoXtop25/Total',
'ChromiumPerf/cros-*/dromaeo.*/Total')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'OtherMaster/cros-*/dromaeo.*/Total')
def testMatchesPattern_MorePartialWildcards(self):
# Note that the wildcard matches zero or more characters.
self._AssertMatches(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'Chromium*/cros-one*/*.*/To*al')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'Chromium*/linux-*/*.*/To*al')
def testMatchesPattern_RequiresFullMatchAtEnd(self):
# If there is no wildcard at the beginning or end of the
# test path part, then a part will only match if it matches
# right up to the beginning or end.
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/cros-one/dromaeo.top25/*Tot')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/cros-one/dromaeo.top25/otal*')
def testMostSpecificMatchingPattern_SpecificVsGeneral(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/*/*', 1), ('*/*/*/Total', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_PartialVsGeneral(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/*/*', 1), ('*/*/*/To*al', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_2ndLevel(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/*/*', 1), ('*/*/S/*', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_TopLevelSpecificOverLowerSpecific(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/S/*', 1), ('*/*/*/Total', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_TopLevelPartialOverLowerSpecific(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/S/*', 1), ('*/*/*/To*al', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_Duplicate(self):
test_key = utils.TestKey('Does/Not/Match/Something')
result = utils.MostSpecificMatchingPattern(
test_key,
[('Does/Not/Match/*', 1), ('Does/Not/Match/*', 2)]
)
self.assertEqual(1, result)
def testParseTelemetryMetricParts_TooShort(self):
with self.assertRaises(utils.ParseTelemetryMetricFailed):
utils.ParseTelemetryMetricParts('M/B/S')
def testParseTelemetryMetricParts_TooLong(self):
with self.assertRaises(utils.ParseTelemetryMetricFailed):
utils.ParseTelemetryMetricParts('M/B/S/1/2/3/4')
def testParseTelemetryMetricParts_1Part(self):
self.assertEqual(
('', 'Measurement', ''),
utils.ParseTelemetryMetricParts('M/B/Suite/Measurement'))
def testParseTelemetryMetricParts_2Part(self):
self.assertEqual(
('', 'Measurement', 'Story'),
utils.ParseTelemetryMetricParts('M/B/Suite/Measurement/Story'))
def testParseTelemetryMetricParts_3Part(self):
self.assertEqual(
('TIRLabel', 'Measurement', 'Story'),
utils.ParseTelemetryMetricParts('M/B/Suite/Measurement/TIRLabel/Story'))
def _PutEntitiesAllExternal(self):
"""Puts entities (none internal-only) and returns the keys."""
master = graph_data.Master(id='M').put()
graph_data.Bot(parent=master, id='b').put()
keys = [
graph_data.TestMetadata(id='M/b/a', internal_only=False),
graph_data.TestMetadata(id='M/b/b', internal_only=False),
graph_data.TestMetadata(id='M/b/c', internal_only=False),
graph_data.TestMetadata(id='M/b/d', internal_only=False),
]
for t in keys:
t.UpdateSheriff()
keys = [k.put() for k in keys]
return keys
def _PutEntitiesHalfInternal(self):
"""Puts entities (half internal-only) and returns the keys."""
master = graph_data.Master(id='M').put()
graph_data.Bot(parent=master, id='b').put()
keys = [
graph_data.TestMetadata(id='M/b/ax', internal_only=True),
graph_data.TestMetadata(id='M/b/a', internal_only=False),
graph_data.TestMetadata(id='M/b/b', internal_only=False),
graph_data.TestMetadata(id='M/b/bx', internal_only=True),
graph_data.TestMetadata(id='M/b/c', internal_only=False),
graph_data.TestMetadata(id='M/b/cx', internal_only=True),
graph_data.TestMetadata(id='M/b/d', internal_only=False),
graph_data.TestMetadata(id='M/b/dx', internal_only=True),
]
for t in keys:
t.UpdateSheriff()
keys = [k.put() for k in keys]
return keys
def testGetMulti_ExternalUser_ReturnsSomeEntities(self):
keys = self._PutEntitiesHalfInternal()
self.SetCurrentUser('foo@chromium.org')
self.assertEqual(len(keys) / 2, len(utils.GetMulti(keys)))
def testGetMulti_InternalUser_ReturnsAllEntities(self):
keys = self._PutEntitiesHalfInternal()
self.SetCurrentUser('internal@chromium.org')
self.assertEqual(len(keys), len(utils.GetMulti(keys)))
def testGetMulti_AllExternalEntities_ReturnsAllEntities(self):
keys = self._PutEntitiesAllExternal()
self.SetCurrentUser('internal@chromium.org')
self.assertEqual(len(keys), len(utils.GetMulti(keys)))
def testTestPath_Test(self):
key = ndb.Key('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric')
self.assertEqual('m/b/suite/metric', utils.TestPath(key))
def testTestPath_TestMetadata(self):
key = ndb.Key('TestMetadata', 'm/b/suite/metric')
self.assertEqual('m/b/suite/metric', utils.TestPath(key))
def testTestPath_Container(self):
key = ndb.Key('TestContainer', 'm/b/suite/metric')
self.assertEqual('m/b/suite/metric', utils.TestPath(key))
def testTestMetadataKey_None(self):
key = utils.TestMetadataKey(None)
self.assertIsNone(key)
def testTestMetadataKey_Test(self):
key = utils.TestMetadataKey(
ndb.Key('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric'))
self.assertEqual('TestMetadata', key.kind())
self.assertEqual('m/b/suite/metric', key.id())
self.assertEqual(('TestMetadata', 'm/b/suite/metric'), key.flat())
def testTestMetadataKey_TestMetadata(self):
original_key = ndb.Key('TestMetadata', 'm/b/suite/metric')
key = utils.TestMetadataKey(original_key)
self.assertEqual(original_key, key)
def testTestMetadataKey_String(self):
key = utils.TestMetadataKey('m/b/suite/metric/page')
self.assertEqual('TestMetadata', key.kind())
self.assertEqual('m/b/suite/metric/page', key.id())
self.assertEqual(('TestMetadata', 'm/b/suite/metric/page'), key.flat())
def testOldStyleTestKey_None(self):
key = utils.OldStyleTestKey(None)
self.assertIsNone(key)
def testOldStyleTestKey_Test(self):
original_key = ndb.Key(
'Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric')
key = utils.OldStyleTestKey(original_key)
self.assertEqual(original_key, key)
def testOldStyleTestKey_TestMetadata(self):
key = utils.OldStyleTestKey(ndb.Key('TestMetadata', 'm/b/suite/metric'))
self.assertEqual('Test', key.kind())
self.assertEqual('metric', key.id())
self.assertEqual(
('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric'),
key.flat())
def testOldStyleTestKey_String(self):
key = utils.OldStyleTestKey('m/b/suite/metric')
self.assertEqual('Test', key.kind())
self.assertEqual('metric', key.id())
self.assertEqual(
('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric'),
key.flat())
def testTestSuiteName_Basic(self):
key = utils.TestKey('Master/bot/suite-foo/sub/x/y/z')
self.assertEqual('suite-foo', utils.TestSuiteName(key))
def testMinimumRange_Empty_ReturnsNone(self):
self.assertIsNone(utils.MinimumRange([]))
def testMinimumRange_NotOverlapping_ReturnsNone(self):
self.assertIsNone(utils.MinimumRange([(5, 10), (15, 20)]))
def testMinimumRange_OneRange_ReturnsSameRange(self):
self.assertEqual((5, 10), utils.MinimumRange([(5, 10)]))
def testMinimumRange_OverlapsForOneNumber_ReturnsRangeWithOneNumber(self):
self.assertEqual((5, 5), utils.MinimumRange([(2, 5), (5, 10)]))
def testMinimumRange_MoreThanTwoRanges_ReturnsIntersection(self):
self.assertEqual((6, 14), utils.MinimumRange(
[(3, 20), (5, 15), (6, 25), (3, 14)]))
def testValidate_StringNotInOptionList_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
['completed', 'pending', 'failed'], 'running')
def testValidate_InvalidType_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(int, 'a string')
def testValidate_MissingProperty_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
{'status': str, 'try_job_id': int, 'required_property': int},
{'status': 'completed', 'try_job_id': 1234})
def testValidate_InvalidTypeInDict_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
{'status': int, 'try_job_id': int},
{'status': 'completed', 'try_job_id': 1234})
def testValidate_StringNotInNestedOptionList_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
{'values': {'nested_values': ['orange', 'banana']}},
{'values': {'nested_values': 'apple'}})
def testValidate_MissingPropertyInNestedDict_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
{'values': {'nested_values': ['orange', 'banana']}},
{'values': {}})
def testValidate_ExpectedValueIsNone_Passes(self):
utils.Validate(None, 'running')
def testValidate_StringInOptionList_Passes(self):
utils.Validate(str, 'a string')
def testValidate_HasExpectedProperties_Passes(self):
utils.Validate(
{'status': str, 'try_job_id': int},
{'status': 'completed', 'try_job_id': 1234})
def testValidate_StringInNestedOptionList_Passes(self):
utils.Validate(
{'values': {'nested_values': ['orange', 'banana']}},
{'values': {'nested_values': 'orange'}})
def testValidate_TypeConversion_Passes(self):
utils.Validate([1], '1')
def testGetBuildDetailsFromStdioLink_InvalidLink(self):
base_url, master, bot, number, step = utils.GetBuildDetailsFromStdioLink(
'[Buildbot stdio](http://notquite/builders/whatever/234)')
self.assertIsNone(base_url)
self.assertIsNone(master)
self.assertIsNone(bot)
self.assertIsNone(number)
self.assertIsNone(step)
def testGetBuildDetailsFromStdioLink(self):
base_url, master, bot, number, step = utils.GetBuildDetailsFromStdioLink((
'[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'
'Android%20One%20Perf%20%282%29/builds/5365/steps/'
'blink_style.top_25/logs/stdio)'))
self.assertEqual('https://build.chromium.org/p/chromium.perf/builders/',
base_url)
self.assertEqual('chromium.perf', master)
self.assertEqual('Android One Perf (2)', bot)
self.assertEqual('5365', number)
self.assertEqual('blink_style.top_25', step)
def testGetBuildDetailsFromStdioLink_DifferentBaseUrl(self):
base_url, master, bot, number, step = utils.GetBuildDetailsFromStdioLink((
'[Buildbot stdio]('
'https://uberchromegw.corp.google.com/i/new.master/builders/Builder/'
'builds/3486/steps/new_test/logs/stdio)'))
self.assertEqual(
'https://uberchromegw.corp.google.com/i/new.master/builders/',
base_url)
self.assertEqual('new.master', master)
self.assertEqual('Builder', bot)
self.assertEqual('3486', number)
self.assertEqual('new_test', step)
def testGetBuildbotStatusPageUriFromStdioLink(self):
buildbot_status_page = utils.GetBuildbotStatusPageUriFromStdioLink((
'[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'
'Android%20One%20Perf%20%282%29/builds/5365/steps/'
'blink_style.top_25/logs/stdio)'))
self.assertEqual((
'https://build.chromium.org/p/chromium.perf/builders/'
'Android%20One%20Perf%20%282%29/builds/5365'), buildbot_status_page)
def testGetLogdogLogUriFromStdioLink(self):
logdog_uri = utils.GetLogdogLogUriFromStdioLink((
'[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'
'Android%20One%20Perf%20%282%29/builds/5365/steps/'
'blink_style.top_25/logs/stdio)'))
self.assertEqual((
'https://luci-logdog.appspot.com/v/?s='
'chrome%2Fbb%2Fchromium.perf%2FAndroid_One_Perf__2_%2F5365%2F%2B%2F'
'recipes%2Fsteps%2Fblink_style.top_25%2F0%2Fstdout'), logdog_uri)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('common.utils.discovery.build')
def testIsGroupMember_PositiveCase(self, mock_discovery_build):
mock_request = mock.MagicMock()
mock_request.execute = mock.MagicMock(return_value={'is_member': True})
mock_service = mock.MagicMock()
mock_service.membership = mock.MagicMock(
return_value=mock_request)
mock_discovery_build.return_value = mock_service
self.assertTrue(utils.IsGroupMember('foo@bar.com', 'group'))
mock_service.membership.assert_called_once_with(
identity='foo@bar.com', group='group')
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('logging.error')
@mock.patch('common.utils.discovery.build')
def testIsGroupMember_RequestFails_LogsErrorAndReturnsFalse(
self, mock_discovery_build, mock_logging_error):
mock_service = mock.MagicMock()
mock_service.membership = mock.MagicMock(
return_value={'error': 'Some error'})
mock_discovery_build.return_value = mock_service
self.assertFalse(utils.IsGroupMember('foo@bar.com', 'group'))
self.assertEqual(1, mock_logging_error.call_count)
def testGetSheriffForAutorollCommit_NotAutoroll_ReturnsNone(self):
self.assertIsNone(utils.GetSheriffForAutorollCommit(
'user@foo.org', 'TBR=donotreturnme@foo.org'))
self.assertIsNone(utils.GetSheriffForAutorollCommit(
'not-a-roll@foo.org', 'TBR=donotreturnme@foo.org'))
def testGetSheriffForAutorollCommit_AutoRoll_ReturnsSheriff(self):
self.assertEqual(
'sheriff@foo.org',
utils.GetSheriffForAutorollCommit(
'chromium-autoroll@skia-public.iam.gserviceaccount.com',
'This is a roll.\n\nTBR=sheriff@foo.org,bar@foo.org\n\n'
))
self.assertEqual(
'sheriff@v8.com',
utils.GetSheriffForAutorollCommit(
'v8-ci-autoroll-builder@'
'chops-service-accounts.iam.gserviceaccount.com',
'TBR=sheriff@v8.com'))
# Some alternative spellings for TBR.
self.assertEqual(
'sheriff@v8.com',
utils.GetSheriffForAutorollCommit(
'v8-ci-autoroll-builder@'
'chops-service-accounts.iam.gserviceaccount.com',
'TBR: sheriff@v8.com'))
self.assertEqual(
'sheriff@v8.com',
utils.GetSheriffForAutorollCommit(
'v8-ci-autoroll-builder@'
'chops-service-accounts.iam.gserviceaccount.com',
'Tbr: sheriff@v8.com'))
self.assertEqual(
'sheriff@v8.com',
utils.GetSheriffForAutorollCommit(
'v8-ci-autoroll-builder@'
'chops-service-accounts.iam.gserviceaccount.com',
'TBR= sheriff@v8.com'))
@mock.patch.object(utils, 'GetEmail',
mock.MagicMock(return_value='admin@chromium.org'))
def testIsAdministrator(self):
self.assertTrue(utils.IsAdministrator())
@mock.patch.object(utils, 'GetEmail',
mock.MagicMock(return_value='internal@chromium.org'))
def testIsNotAdministrator(self):
self.assertFalse(utils.IsAdministrator())
def _MakeMockFetch(base64_encoded=True, status=200):
"""Returns a mock fetch object that returns a canned response."""
def _MockFetch(_):
response_text = json.dumps({'key': 'this is well-formed JSON.'})
if base64_encoded:
response_text = base64.b64encode(response_text)
return testing_common.FakeResponseObject(status, response_text)
return _MockFetch
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.serialization import jsonutils
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields as obj_fields
from nova.virt import hardware
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceNUMACell(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add pagesize field
# Version 1.2: Add cpu_pinning_raw and topology fields
VERSION = '1.2'
fields = {
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True),
'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
nullable=True),
'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True)
}
obj_relationships = {
'cpu_topology': [('1.2', '1.0')]
}
cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')
def __init__(self, **kwargs):
super(InstanceNUMACell, self).__init__(**kwargs)
if 'pagesize' not in kwargs:
self.pagesize = None
self.obj_reset_changes(['pagesize'])
if 'cpu_topology' not in kwargs:
self.cpu_topology = None
self.obj_reset_changes(['cpu_topology'])
if 'cpu_pinning' not in kwargs:
self.cpu_pinning = None
self.obj_reset_changes(['cpu_pinning_raw'])
def __len__(self):
return len(self.cpuset)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_to_dict_ to the future to avoid confusing.
return {'cpus': hardware.format_cpu_spec(self.cpuset,
allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id,
'pagesize': self.pagesize}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_from_dict_ to the future to avoid confusing.
cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
pagesize = data_dict.get('pagesize')
return cls(id=cell_id, cpuset=cpuset,
memory=memory, pagesize=pagesize)
@property
def siblings(self):
cpu_list = sorted(list(self.cpuset))
threads = 0
if self.cpu_topology:
threads = self.cpu_topology.threads
if threads == 1:
threads = 0
return map(set, zip(*[iter(cpu_list)] * threads))
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
return
pinning_dict = self.cpu_pinning or {}
pinning_dict[vcpu] = pcpu
self.cpu_pinning = pinning_dict
def pin_vcpus(self, *cpu_pairs):
for vcpu, pcpu in cpu_pairs:
self.pin(vcpu, pcpu)
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceNUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Takes into account pagesize
VERSION = '1.1'
fields = {
# NOTE(danms): The 'id' field is no longer used and should be
# removed in the future when convenient
'id': obj_fields.IntegerField(),
'instance_uuid': obj_fields.UUIDField(),
'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'),
}
obj_relationships = {
'cells': [('1.0', '1.0')],
}
@classmethod
def obj_from_primitive(cls, primitive):
if 'nova_object.name' in primitive:
obj_topology = super(InstanceNUMATopology, cls).obj_from_primitive(
primitive)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = InstanceNUMATopology._from_dict(primitive)
obj_topology.id = 0
return obj_topology
@classmethod
def obj_from_db_obj(cls, instance_uuid, db_obj):
primitive = jsonutils.loads(db_obj)
obj_topology = cls.obj_from_primitive(primitive)
if 'nova_object.name' not in db_obj:
obj_topology.instance_uuid = instance_uuid
# No benefit to store a list of changed fields
obj_topology.obj_reset_changes()
return obj_topology
# TODO(ndipanov) Remove this method on the major version bump to 2.0
@base.remotable
def create(self, context):
self._save(context)
# NOTE(ndipanov): We can't rename create and want to avoid version bump
# as this needs to be backported to stable so this is not a @remotable
# That's OK since we only call it from inside Instance.save() which is.
def _save(self, context):
values = {'numa_topology': self._to_json()}
db.instance_extra_update_by_uuid(context, self.instance_uuid,
values)
self.obj_reset_changes()
# NOTE(ndipanov): We want to avoid version bump
# as this needs to be backported to stable so this is not a @remotable
# That's OK since we only call it from inside Instance.save() which is.
@classmethod
def delete_by_instance_uuid(cls, context, instance_uuid):
values = {'numa_topology': None}
db.instance_extra_update_by_uuid(context, instance_uuid,
values)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['numa_topology'])
if not db_extra:
raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid)
if db_extra['numa_topology'] is None:
return None
return cls.obj_from_db_obj(instance_uuid, db_extra['numa_topology'])
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_to_dict_
# in the future to avoid confusing.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_from_dict_
# in the future to avoid confusing.
return cls(cells=[
InstanceNUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test case that utilizes httplib2 against the API server"""
import hashlib
import httplib2
from oslo.serialization import jsonutils
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.tests import functional
from glance.tests.utils import minimal_headers
from glance.tests.utils import skip_if_disabled
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
class TestApi(functional.FunctionalTest):
"""Functional tests using httplib2 against the API server"""
@skip_if_disabled
def test_get_head_simple_post(self):
"""
We test the following sequential series of actions:
0. GET /images
- Verify no public images
1. GET /images/detail
- Verify no public images
2. POST /images with public image named Image1
and no custom properties
- Verify 201 returned
3. HEAD image
- Verify HTTP headers have correct information we just added
4. GET image
- Verify all information on image we just added is correct
5. GET /images
- Verify the image we just added is returned
6. GET /images/detail
- Verify the image we just added is returned
7. PUT image with custom properties of "distro" and "arch"
- Verify 200 returned
8. PUT image with too many custom properties
- Verify 413 returned
9. GET image
- Verify updated information about image was stored
10. PUT image
- Remove a previously existing property.
11. PUT image
- Add a previously deleted property.
12. PUT image/members/member1
- Add member1 to image
13. PUT image/members/member2
- Add member2 to image
14. GET image/members
- List image members
15. DELETE image/members/member1
- Delete image member1
16. PUT image/members
- Attempt to replace members with an overlimit amount
17. PUT image/members/member11
- Attempt to add a member while at limit
18. POST /images with another public image named Image2
- attribute and three custom properties, "distro", "arch" & "foo"
- Verify a 200 OK is returned
19. HEAD image2
- Verify image2 found now
20. GET /images
- Verify 2 public images
21. GET /images with filter on user-defined property "distro".
- Verify both images are returned
22. GET /images with filter on user-defined property 'distro' but
- with non-existent value. Verify no images are returned
23. GET /images with filter on non-existent user-defined property
- "boo". Verify no images are returned
24. GET /images with filter 'arch=i386'
- Verify only image2 is returned
25. GET /images with filter 'arch=x86_64'
- Verify only image1 is returned
26. GET /images with filter 'foo=bar'
- Verify only image2 is returned
27. DELETE image1
- Delete image
28. GET image/members
- List deleted image members
29. PUT image/members/member2
- Update existing member2 of deleted image
30. PUT image/members/member3
- Add member3 to deleted image
31. DELETE image/members/member2
- Delete member2 from deleted image
32. DELETE image2
- Delete image
33. GET /images
- Verify no images are listed
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# 0. GET /images
# Verify no public images
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
# 1. GET /images/detail
# Verify no public images
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
# 2. POST /images with public image named Image1
# attribute and no custom properties. Verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.assertEqual(hashlib.md5(image_data).hexdigest(),
data['image']['checksum'])
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("Image1", data['image']['name'])
self.assertTrue(data['image']['is_public'])
# 3. HEAD image
# Verify image found now
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Image1", response['x-image-meta-name'])
# 4. GET image
# Verify all information on image we just added is correct
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_image_headers = {
'x-image-meta-id': image_id,
'x-image-meta-name': 'Image1',
'x-image-meta-is_public': 'True',
'x-image-meta-status': 'active',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'x-image-meta-size': str(FIVE_KB)}
expected_std_headers = {
'content-length': str(FIVE_KB),
'content-type': 'application/octet-stream'}
for expected_key, expected_value in expected_image_headers.items():
self.assertEqual(expected_value, response[expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
for expected_key, expected_value in expected_std_headers.items():
self.assertEqual(expected_value, response[expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
# 5. GET /images
# Verify one public image
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_result = {"images": [
{"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"name": "Image1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(expected_result, jsonutils.loads(content))
# 6. GET /images/detail
# Verify image and all its metadata
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_image = {
"status": "active",
"name": "Image1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"is_public": True,
"deleted_at": None,
"properties": {},
"size": 5120}
image = jsonutils.loads(content)
for expected_key, expected_value in expected_image.items():
self.assertEqual(expected_value, image['images'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
image['images'][0][expected_key]))
# 7. PUT image with custom properties of "distro" and "arch"
# Verify 200 returned
headers = {'X-Image-Meta-Property-Distro': 'Ubuntu',
'X-Image-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual("x86_64", data['image']['properties']['arch'])
self.assertEqual("Ubuntu", data['image']['properties']['distro'])
# 8. PUT image with too many custom properties
# Verify 413 returned
headers = {}
for i in range(11): # configured limit is 10
headers['X-Image-Meta-Property-foo%d' % i] = 'bar'
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(413, response.status)
# 9. GET /images/detail
# Verify image and all its metadata
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_image = {
"status": "active",
"name": "Image1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"is_public": True,
"deleted_at": None,
"properties": {'distro': 'Ubuntu', 'arch': 'x86_64'},
"size": 5120}
image = jsonutils.loads(content)
for expected_key, expected_value in expected_image.items():
self.assertEqual(expected_value, image['images'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
image['images'][0][expected_key]))
# 10. PUT image and remove a previously existing property.
headers = {'X-Image-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['images'][0]
self.assertEqual(1, len(data['properties']))
self.assertEqual("x86_64", data['properties']['arch'])
# 11. PUT image and add a previously deleted property.
headers = {'X-Image-Meta-Property-Distro': 'Ubuntu',
'X-Image-Meta-Property-Arch': 'x86_64'}
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['images'][0]
self.assertEqual(2, len(data['properties']))
self.assertEqual("x86_64", data['properties']['arch'])
self.assertEqual("Ubuntu", data['properties']['distro'])
self.assertNotEqual(data['created_at'], data['updated_at'])
# 12. Add member to image
path = ("http://%s:%d/v1/images/%s/members/pattieblack" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(204, response.status)
# 13. Add member to image
path = ("http://%s:%d/v1/images/%s/members/pattiewhite" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(204, response.status)
# 14. List image members
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['members']))
self.assertEqual('pattieblack', data['members'][0]['member_id'])
self.assertEqual('pattiewhite', data['members'][1]['member_id'])
# 15. Delete image member
path = ("http://%s:%d/v1/images/%s/members/pattieblack" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(204, response.status)
# 16. Attempt to replace members with an overlimit amount
# Adding 11 image members should fail since configured limit is 10
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
memberships = []
for i in range(11):
member_id = "foo%d" % i
memberships.append(dict(member_id=member_id))
http = httplib2.Http()
body = jsonutils.dumps(dict(memberships=memberships))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(413, response.status)
# 17. Attempt to add a member while at limit
# Adding an 11th member should fail since configured limit is 10
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
memberships = []
for i in range(10):
member_id = "foo%d" % i
memberships.append(dict(member_id=member_id))
http = httplib2.Http()
body = jsonutils.dumps(dict(memberships=memberships))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(204, response.status)
path = ("http://%s:%d/v1/images/%s/members/fail_me" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(413, response.status)
# 18. POST /images with another public image named Image2
# attribute and three custom properties, "distro", "arch" & "foo".
# Verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image2')
headers['X-Image-Meta-Property-Distro'] = 'Ubuntu'
headers['X-Image-Meta-Property-Arch'] = 'i386'
headers['X-Image-Meta-Property-foo'] = 'bar'
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image2_id = data['image']['id']
self.assertEqual(hashlib.md5(image_data).hexdigest(),
data['image']['checksum'])
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("Image2", data['image']['name'])
self.assertTrue(data['image']['is_public'])
self.assertEqual('Ubuntu', data['image']['properties']['distro'])
self.assertEqual('i386', data['image']['properties']['arch'])
self.assertEqual('bar', data['image']['properties']['foo'])
# 19. HEAD image2
# Verify image2 found now
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image2_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Image2", response['x-image-meta-name'])
# 20. GET /images
# Verify 2 public images
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(2, len(images))
self.assertEqual(image2_id, images[0]['id'])
self.assertEqual(image_id, images[1]['id'])
# 21. GET /images with filter on user-defined property 'distro'.
# Verify both images are returned
path = "http://%s:%d/v1/images?property-distro=Ubuntu" % (
"127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(2, len(images))
self.assertEqual(image2_id, images[0]['id'])
self.assertEqual(image_id, images[1]['id'])
# 22. GET /images with filter on user-defined property 'distro' but
# with non-existent value. Verify no images are returned
path = "http://%s:%d/v1/images?property-distro=fedora" % (
"127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(0, len(images))
# 23. GET /images with filter on non-existent user-defined property
# 'boo'. Verify no images are returned
path = "http://%s:%d/v1/images?property-boo=bar" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(0, len(images))
# 24. GET /images with filter 'arch=i386'
# Verify only image2 is returned
path = "http://%s:%d/v1/images?property-arch=i386" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(1, len(images))
self.assertEqual(image2_id, images[0]['id'])
# 25. GET /images with filter 'arch=x86_64'
# Verify only image1 is returned
path = "http://%s:%d/v1/images?property-arch=x86_64" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(1, len(images))
self.assertEqual(image_id, images[0]['id'])
# 26. GET /images with filter 'foo=bar'
# Verify only image2 is returned
path = "http://%s:%d/v1/images?property-foo=bar" % ("127.0.0.1",
self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(1, len(images))
self.assertEqual(image2_id, images[0]['id'])
# 27. DELETE image1
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# 28. Try to list members of deleted image
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(404, response.status)
# 29. Try to update member of deleted image
path = ("http://%s:%d/v1/images/%s/members" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
body = jsonutils.dumps(dict(memberships=fixture))
response, content = http.request(path, 'PUT', body=body)
self.assertEqual(404, response.status)
# 30. Try to add member to deleted image
path = ("http://%s:%d/v1/images/%s/members/chickenpattie" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(404, response.status)
# 31. Try to delete member of deleted image
path = ("http://%s:%d/v1/images/%s/members/pattieblack" %
("127.0.0.1", self.api_port, image_id))
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(404, response.status)
# 32. DELETE image2
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image2_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# 33. GET /images
# Verify no images are listed
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(0, len(images))
# 34. HEAD /images/detail
path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(405, response.status)
self.assertEqual('GET', response.get('allow'))
self.stop_servers()
def test_download_non_exists_image_raises_http_forbidden(self):
"""
We test the following sequential series of actions:
0. POST /images with public image named Image1
and no custom properties
- Verify 201 returned
1. HEAD image
- Verify HTTP headers have correct information we just added
2. GET image
- Verify all information on image we just added is correct
3. DELETE image1
- Delete the newly added image
4. GET image
- Verify that 403 HTTPForbidden exception is raised prior to
404 HTTPNotFound
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.assertEqual(hashlib.md5(image_data).hexdigest(),
data['image']['checksum'])
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("Image1", data['image']['name'])
self.assertTrue(data['image']['is_public'])
# 1. HEAD image
# Verify image found now
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Image1", response['x-image-meta-name'])
# 2. GET /images
# Verify one public image
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_result = {"images": [
{"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"name": "Image1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(expected_result, jsonutils.loads(content))
# 3. DELETE image1
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# 4. GET image
# Verify that 403 HTTPForbidden exception is raised prior to
# 404 HTTPNotFound
rules = {"download_image": '!'}
self.set_policy_rules(rules)
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(403, response.status)
self.stop_servers()
def test_download_non_exists_image_raises_http_not_found(self):
"""
We test the following sequential series of actions:
0. POST /images with public image named Image1
and no custom properties
- Verify 201 returned
1. HEAD image
- Verify HTTP headers have correct information we just added
2. GET image
- Verify all information on image we just added is correct
3. DELETE image1
- Delete the newly added image
4. GET image
- Verify that 404 HTTPNotFound exception is raised
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.assertEqual(hashlib.md5(image_data).hexdigest(),
data['image']['checksum'])
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("Image1", data['image']['name'])
self.assertTrue(data['image']['is_public'])
# 1. HEAD image
# Verify image found now
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Image1", response['x-image-meta-name'])
# 2. GET /images
# Verify one public image
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_result = {"images": [
{"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"name": "Image1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(expected_result, jsonutils.loads(content))
# 3. DELETE image1
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# 4. GET image
# Verify that 404 HTTPNotFound exception is raised
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(404, response.status)
self.stop_servers()
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
from testing_utils import testing
from components import auth
from components import auth_testing
from components import utils
from cipd import acl
from cipd import api
from cipd import impl
class PackageRepositoryApiTest(testing.EndpointsTestCase):
"""Tests for API layer ONLY."""
maxDiff = None
api_service_cls = api.PackageRepositoryApi
def setUp(self):
super(PackageRepositoryApiTest, self).setUp()
auth_testing.mock_get_current_identity(self)
auth_testing.mock_is_admin(self)
self.repo_service = MockedRepoService()
self.mock(impl, 'get_repo_service', lambda: self.repo_service)
def register_fake_instance(self, pkg_name):
_, registered = self.repo_service.register_instance(
package_name=pkg_name,
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1, 0, 0))
self.assertTrue(registered)
def test_fetch_package_ok(self):
self.register_fake_instance('good/name')
resp = self.call_api('fetch_package', {'package_name': 'good/name'})
self.assertEqual({
'package': {
'package_name': 'good/name',
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
},
'status': 'SUCCESS',
}, resp.json_body)
def test_fetch_package_no_access(self):
self.register_fake_instance('good/name')
self.mock(api.acl, 'can_fetch_package', lambda *_: False)
with self.call_should_fail(403):
self.call_api('fetch_package', {'package_name': 'good/name'})
def test_fetch_package_no_such_package(self):
resp = self.call_api('fetch_package', {'package_name': 'good/name'})
self.assertEqual({'status': 'PACKAGE_NOT_FOUND'}, resp.json_body)
def test_fetch_package_bad_name(self):
resp = self.call_api('fetch_package', {'package_name': 'bad name'})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package name',
}, resp.json_body)
def test_list_packages_no_results(self):
resp = self.call_api('list_packages', {})
self.assertEqual({
'status': 'SUCCESS',
}, resp.json_body)
def test_list_packages_all_packages(self):
self.register_fake_instance('good/name')
resp = self.call_api('list_packages', {'recursive': True})
self.assertEqual({
'status': 'SUCCESS',
'packages': ['good/name'],
'directories': ['good'],
}, resp.json_body)
def test_list_packages_filter_no_access(self):
self.register_fake_instance('good/name')
self.mock(api.acl, 'can_fetch_package', lambda *_: False)
resp = self.call_api('list_packages', {})
self.assertEqual({
'status': 'SUCCESS',
}, resp.json_body)
def test_list_packages_in_path(self):
self.register_fake_instance('p/a')
self.register_fake_instance('p/y')
self.register_fake_instance('p/z/z')
self.register_fake_instance('pp')
self.register_fake_instance('q')
resp = self.call_api('list_packages', {
'path': 'p',
'recursive': False,
})
self.assertEqual({
'status': 'SUCCESS',
'packages': [
'p/a',
'p/y',
],
'directories': [
'p/z',
],
}, resp.json_body)
resp = self.call_api('list_packages', {
'path': 'p',
'recursive': True,
})
self.assertEqual({
'status': 'SUCCESS',
'packages': [
'p/a',
'p/y',
'p/z/z',
],
'directories': [
'p/z',
],
}, resp.json_body)
def test_fetch_instance_ok(self):
inst, registered = self.repo_service.register_instance(
package_name='good/name',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
self.assertTrue(registered)
resp = self.call_api('fetch_instance', {
'package_name': 'good/name',
'instance_id': 'a'*40,
})
self.assertEqual({
'fetch_url': 'http://signed-url/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'instance': {
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'package_name': 'good/name',
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
},
'status': 'SUCCESS',
}, resp.json_body)
# Add some fake processors, ensure they appear in the output.
inst.processors_pending = ['pending1', 'pending2']
inst.processors_success = ['success1', 'success2']
inst.processors_failure = ['failure1', 'failure2']
inst.put()
resp = self.call_api('fetch_instance', {
'package_name': 'good/name',
'instance_id': 'a'*40,
})
self.assertEqual({
'fetch_url': 'http://signed-url/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'instance': {
'instance_id': u'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'package_name': u'good/name',
'registered_by': u'user:abc@example.com',
'registered_ts': u'1388534400000000',
},
'processors': [
{'status': 'PENDING', 'name': 'pending1'},
{'status': 'PENDING', 'name': 'pending2'},
{'status': 'SUCCESS', 'name': 'success1'},
{'status': 'SUCCESS', 'name': 'success2'},
{'status': 'FAILURE', 'name': 'failure1'},
{'status': 'FAILURE', 'name': 'failure2'},
],
'status': 'SUCCESS',
}, resp.json_body)
def test_fetch_instance_no_access(self):
_, registered = self.repo_service.register_instance(
package_name='good/name',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
self.assertTrue(registered)
self.mock(api.acl, 'can_fetch_instance', lambda *_: False)
with self.call_should_fail(403):
self.call_api('fetch_instance', {
'package_name': 'good/name',
'instance_id': 'a'*40,
})
def test_fetch_instance_no_such_package(self):
resp = self.call_api('fetch_instance', {
'package_name': 'good/name',
'instance_id': 'a'*40,
})
self.assertEqual({'status': 'PACKAGE_NOT_FOUND'}, resp.json_body)
def test_fetch_instance_no_such_instance(self):
_, registered = self.repo_service.register_instance(
package_name='good/name',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
self.assertTrue(registered)
resp = self.call_api('fetch_instance', {
'package_name': 'good/name',
'instance_id': 'b'*40,
})
self.assertEqual({'status': 'INSTANCE_NOT_FOUND'}, resp.json_body)
def test_fetch_instance_bad_name(self):
resp = self.call_api('fetch_instance', {
'package_name': 'bad name',
'instance_id': 'a'*40,
})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package name',
}, resp.json_body)
def test_fetch_instance_bad_instance_id(self):
resp = self.call_api('fetch_instance', {
'package_name': 'good/name',
'instance_id': 'bad instance id',
})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package instance ID',
}, resp.json_body)
def test_fetch_instance_no_service(self):
self.repo_service = None
with self.call_should_fail(500):
self.call_api('fetch_instance', {
'package_name': 'good/name',
'instance_id': 'a'*40,
})
def test_register_new_instance_flow(self):
self.mock(utils, 'utcnow', lambda: datetime.datetime(2014, 1, 1))
request = {
'package_name': 'good/name',
'instance_id': 'a'*40,
}
# Package is not uploaded yet. Should ask to upload.
resp = self.call_api('register_instance', request)
self.assertEqual(200, resp.status_code)
self.assertEqual({
'status': 'UPLOAD_FIRST',
'upload_session_id': 'upload_session_id',
'upload_url': 'http://upload_url',
}, resp.json_body)
# Pretend it is upload now.
self.repo_service.uploaded.add('a'*40)
# Should register the package.
resp = self.call_api('register_instance', request)
self.assertEqual(200, resp.status_code)
self.assertEqual({
'status': 'REGISTERED',
'instance': {
'instance_id': 'a'*40,
'package_name': 'good/name',
'registered_by': 'user:mocked@example.com',
'registered_ts': '1388534400000000',
},
}, resp.json_body)
# Check that it is indeed there.
pkg = self.repo_service.get_instance('good/name', 'a'*40)
self.assertTrue(pkg)
expected = {
'registered_by': auth.Identity(kind='user', name='mocked@example.com'),
'registered_ts': datetime.datetime(2014, 1, 1, 0, 0),
'processors_failure': [],
'processors_pending': [],
'processors_success': [],
}
self.assertEqual(expected, pkg.to_dict())
# Attempt to register it again.
resp = self.call_api('register_instance', request)
self.assertEqual(200, resp.status_code)
self.assertEqual({
'status': 'ALREADY_REGISTERED',
'instance': {
'instance_id': 'a'*40,
'package_name': 'good/name',
'registered_by': 'user:mocked@example.com',
'registered_ts': '1388534400000000',
},
}, resp.json_body)
def test_register_instance_bad_name(self):
resp = self.call_api('register_instance', {
'package_name': 'bad name',
'instance_id': 'a'*40,
})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package name',
}, resp.json_body)
def test_register_instance_bad_instance_id(self):
resp = self.call_api('register_instance', {
'package_name': 'good/name',
'instance_id': 'bad instance id',
})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package instance ID',
}, resp.json_body)
def test_register_instance_no_access(self):
self.mock(api.acl, 'can_register_instance', lambda *_: False)
with self.call_should_fail(403):
self.call_api('register_instance', {
'package_name': 'good/name',
'instance_id': 'a'*40,
})
def test_register_instance_no_service(self):
self.repo_service = None
with self.call_should_fail(500):
self.call_api('register_instance', {
'package_name': 'good/name',
'instance_id': 'a'*40,
})
def test_fetch_acl_ok(self):
acl.modify_roles(
changes=[
acl.RoleChange(
package_path='a',
revoke=False,
role='OWNER',
user=auth.Identity.from_bytes('user:xyz@example.com'),
group=None),
acl.RoleChange(
package_path='a/b/c',
revoke=False,
role='READER',
user=None,
group='reader-group'),
],
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
resp = self.call_api('fetch_acl', {'package_path': 'a/b/c/d'})
self.assertEqual(200, resp.status_code)
self.assertEqual({
'status': 'SUCCESS',
'acls': {
'acls': [
{
'modified_by': 'user:abc@example.com',
'modified_ts': '1388534400000000',
'package_path': 'a',
'principals': ['user:xyz@example.com'],
'role': 'OWNER',
},
{
'modified_by': 'user:abc@example.com',
'modified_ts': '1388534400000000',
'package_path': 'a/b/c',
'principals': ['group:reader-group'],
'role': 'READER',
},
],
},
}, resp.json_body)
def test_fetch_acl_missing(self):
resp = self.call_api('fetch_acl', {'package_path': 'a/b/c/d'})
self.assertEqual(200, resp.status_code)
self.assertEqual({'status': 'SUCCESS', 'acls': {}}, resp.json_body)
def test_fetch_acl_bad_package_name(self):
resp = self.call_api('fetch_acl', {'package_path': 'bad name'})
self.assertEqual(200, resp.status_code)
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package path',
}, resp.json_body)
def test_fetch_acl_no_access(self):
self.mock(api.acl, 'can_fetch_acl', lambda *_: False)
with self.call_should_fail(403):
self.call_api('fetch_acl', {'package_path': 'a/b/c'})
def test_modify_acl_ok(self):
self.mock(utils, 'utcnow', lambda: datetime.datetime(2014, 1, 1))
resp = self.call_api('modify_acl', {
'package_path': 'a/b',
'changes': [
{
'action': 'GRANT',
'role': 'OWNER',
'principal': 'user:abc@example.com',
},
{
'action': 'GRANT',
'role': 'READER',
'principal': 'group:readers-group',
},
{
'action': 'REVOKE',
'role': 'WRITER',
'principal': 'anonymous:anonymous',
},
],
})
self.assertEqual(200, resp.status_code)
self.assertEqual({'status': 'SUCCESS'}, resp.json_body)
owner = acl.get_package_acls('a/b/c', 'OWNER')
self.assertEqual(1, len(owner))
self.assertEqual({
'groups': [],
'modified_by': auth.Identity(kind='user', name='mocked@example.com'),
'modified_ts': datetime.datetime(2014, 1, 1, 0, 0),
'rev': 1,
'users': [auth.Identity(kind='user', name='abc@example.com')],
}, owner[0].to_dict())
reader = acl.get_package_acls('a/b/c', 'READER')
self.assertEqual(1, len(reader))
self.assertEqual({
'groups': ['readers-group'],
'modified_by': auth.Identity(kind='user', name='mocked@example.com'),
'modified_ts': datetime.datetime(2014, 1, 1, 0, 0),
'rev': 1,
'users': [],
}, reader[0].to_dict())
def test_modify_acl_bad_role(self):
resp = self.call_api('modify_acl', {
'package_path': 'a/b',
'changes': [
{
'action': 'GRANT',
'role': 'UNKNOWN_ROLE',
'principal': 'user:abc@example.com',
},
],
})
self.assertEqual(200, resp.status_code)
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid role change request: Invalid role UNKNOWN_ROLE',
}, resp.json_body)
def test_modify_acl_bad_group_name(self):
resp = self.call_api('modify_acl', {
'package_path': 'a/b',
'changes': [
{
'action': 'GRANT',
'role': 'OWNER',
'principal': 'group:bad/group/name',
},
],
})
self.assertEqual(200, resp.status_code)
self.assertEqual({
'status': 'ERROR',
'error_message': (
'Invalid role change request: Invalid group name: "bad/group/name"'),
}, resp.json_body)
def test_modify_acl_bad_package_name(self):
resp = self.call_api('modify_acl', {
'package_path': 'bad name',
'changes': [],
})
self.assertEqual(200, resp.status_code)
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package path',
}, resp.json_body)
def test_modify_acl_no_access(self):
self.mock(api.acl, 'can_modify_acl', lambda *_: False)
with self.call_should_fail(403):
self.call_api('modify_acl', {
'package_path': 'a/b/c',
'changes': [],
})
def test_fetch_client_binary_ok(self):
_, registered = self.repo_service.register_instance(
package_name='infra/tools/cipd/linux-amd64',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
self.assertTrue(registered)
# Mock get_client_binary_info. It is tested separately in impl_test.py.
def mocked_get_info(instance):
self.assertEqual('infra/tools/cipd/linux-amd64', instance.package_name)
self.assertEqual('a'*40, instance.instance_id)
return client_binary_info_response
self.mock(self.repo_service, 'get_client_binary_info', mocked_get_info)
# None, None -> still processing.
client_binary_info_response = None, None
resp = self.call_api('fetch_client_binary', {
'package_name': 'infra/tools/cipd/linux-amd64',
'instance_id': 'a'*40,
})
self.assertEqual({
'instance': {
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'package_name': 'infra/tools/cipd/linux-amd64',
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
},
'status': 'NOT_EXTRACTED_YET',
}, resp.json_body)
# Error message.
client_binary_info_response = None, 'Some error message'
resp = self.call_api('fetch_client_binary', {
'package_name': 'infra/tools/cipd/linux-amd64',
'instance_id': 'a'*40,
})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Some error message',
}, resp.json_body)
# Successfully extracted.
client_binary_info_response = impl.ClientBinaryInfo(
sha1='b'*40,
size=123,
fetch_url='https://client_url'), None
resp = self.call_api('fetch_client_binary', {
'package_name': 'infra/tools/cipd/linux-amd64',
'instance_id': 'a'*40,
})
self.assertEqual({
'client_binary': {
'fetch_url': 'https://client_url',
'sha1': 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
'size': '123',
},
'instance': {
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'package_name': 'infra/tools/cipd/linux-amd64',
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
},
'status': 'SUCCESS',
}, resp.json_body)
def test_fetch_client_binary_no_access(self):
_, registered = self.repo_service.register_instance(
package_name='infra/tools/cipd/linux-amd64',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
self.assertTrue(registered)
# Should return PACKAGE_NOT_FOUND even though package exists.
self.mock(api.acl, 'can_fetch_instance', lambda *_: False)
with self.call_should_fail(403):
self.call_api('fetch_client_binary', {
'package_name': 'infra/tools/cipd/linux-amd64',
'instance_id': 'a'*40,
})
def test_fetch_client_binary_no_such_package(self):
resp = self.call_api('fetch_client_binary', {
'package_name': 'infra/tools/cipd/linux-amd64',
'instance_id': 'a'*40,
})
self.assertEqual({'status': 'PACKAGE_NOT_FOUND'}, resp.json_body)
def test_fetch_client_binary_no_such_instance(self):
_, registered = self.repo_service.register_instance(
package_name='infra/tools/cipd/linux-amd64',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
self.assertTrue(registered)
resp = self.call_api('fetch_client_binary', {
'package_name': 'infra/tools/cipd/linux-amd64',
'instance_id': 'b'*40,
})
self.assertEqual({'status': 'INSTANCE_NOT_FOUND'}, resp.json_body)
def test_fetch_client_binary_bad_name(self):
resp = self.call_api('fetch_client_binary', {
'package_name': 'bad name',
'instance_id': 'a'*40,
})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package name',
}, resp.json_body)
def test_fetch_client_binary_not_a_client(self):
resp = self.call_api('fetch_client_binary', {
'package_name': 'good/name/not/a/client',
'instance_id': 'a'*40,
})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Not a CIPD client package',
}, resp.json_body)
def test_fetch_client_binary_bad_instance_id(self):
resp = self.call_api('fetch_client_binary', {
'package_name': 'infra/tools/cipd/linux-amd64',
'instance_id': 'bad instance id',
})
self.assertEqual({
'status': 'ERROR',
'error_message': 'Invalid package instance ID',
}, resp.json_body)
def register_mock_instance(self):
return self.repo_service.register_instance(
package_name='a/b',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))[0]
def test_set_ref_ok(self):
self.register_mock_instance()
self.mock(utils, 'utcnow', lambda: datetime.datetime(2014, 1, 1))
resp = self.call_api('set_ref', {
'package_name': 'a/b',
'ref': 'ref',
'instance_id': 'a'*40,
})
self.assertEqual({
'ref': {
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'modified_by': 'user:mocked@example.com',
'modified_ts': '1388534400000000',
},
'status': 'SUCCESS',
}, resp.json_body)
def test_set_ref_bad_ref(self):
self.register_mock_instance()
resp = self.call_api('set_ref', {
'package_name': 'a/b',
'ref': 'BAD REF NAME',
'instance_id': 'a'*40,
})
self.assertEqual({
'error_message': 'Invalid package ref name',
'status': 'ERROR',
}, resp.json_body)
def test_set_ref_no_access(self):
self.register_mock_instance()
self.mock(api.acl, 'can_move_ref', lambda *_: False)
with self.call_should_fail(403):
self.call_api('set_ref', {
'package_name': 'a/b',
'ref': 'ref',
'instance_id': 'a'*40,
})
def test_set_ref_no_package(self):
resp = self.call_api('set_ref', {
'package_name': 'a/b',
'ref': 'ref',
'instance_id': 'a'*40,
})
self.assertEqual({'status': 'PACKAGE_NOT_FOUND'}, resp.json_body)
def test_set_ref_no_instance(self):
self.register_mock_instance()
resp = self.call_api('set_ref', {
'package_name': 'a/b',
'ref': 'ref',
'instance_id': 'b'*40,
})
self.assertEqual({'status': 'INSTANCE_NOT_FOUND'}, resp.json_body)
def test_set_ref_not_ready(self):
inst = self.register_mock_instance()
inst.processors_pending = ['proc']
inst.put()
resp = self.call_api('set_ref', {
'package_name': 'a/b',
'ref': 'ref',
'instance_id': 'a'*40,
})
self.assertEqual({
'error_message': 'Pending processors: proc',
'status': 'PROCESSING_NOT_FINISHED_YET',
}, resp.json_body)
def set_tag(self, pkg, tag, ts, instance_id='a'*40):
self.repo_service.register_instance(
package_name=pkg,
instance_id=instance_id,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=ts)
self.repo_service.attach_tags(
package_name=pkg,
instance_id=instance_id,
tags=[tag],
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=ts)
def test_fetch_tags_all(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1))
self.set_tag('a/b', 'tag2:', datetime.datetime(2015, 1, 1))
resp = self.call_api('fetch_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
})
self.assertEqual({
'status': u'SUCCESS',
'tags': [
{
'registered_by': 'user:abc@example.com',
'registered_ts': '1420070400000000',
'tag': 'tag2:',
},
{
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
'tag': 'tag1:',
},
],
}, resp.json_body)
def test_fetch_tags_some(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1))
self.set_tag('a/b', 'tag2:', datetime.datetime(2015, 1, 1))
resp = self.call_api('fetch_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tag': ['tag1:', 'missing:'],
})
self.assertEqual({
'status': u'SUCCESS',
'tags': [
{
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
'tag': 'tag1:',
},
],
}, resp.json_body)
def test_fetch_tags_no_access(self):
self.mock(api.acl, 'can_fetch_instance', lambda *_: False)
with self.call_should_fail(403):
self.call_api('fetch_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tag': ['tag1:', 'missing:'],
})
def test_fetch_tags_no_package(self):
resp = self.call_api('fetch_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tag': ['tag1:', 'missing:'],
})
self.assertEqual({'status': 'PACKAGE_NOT_FOUND'}, resp.json_body)
def test_attach_tags_ok(self):
self.mock(utils, 'utcnow', lambda: datetime.datetime(2015, 1, 1))
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1))
resp = self.call_api('attach_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tags': ['tag1:', 'tag2:'],
})
self.assertEqual({
'status': u'SUCCESS',
'tags': [
{
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
'tag': 'tag1:',
},
{
'registered_by': 'user:mocked@example.com',
'registered_ts': '1420070400000000',
'tag': 'tag2:',
},
],
}, resp.json_body)
def test_attach_tags_no_tags(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1))
resp = self.call_api('attach_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
})
self.assertEqual({
'error_message': 'Tag list is empty',
'status': 'ERROR',
}, resp.json_body)
def test_attach_tags_bad_tag(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1))
resp = self.call_api('attach_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tags': ['BAD_TAG'],
})
self.assertEqual({
'error_message': 'Invalid tag "BAD_TAG"',
'status': 'ERROR',
}, resp.json_body)
def test_attach_tags_no_access(self):
self.mock(api.acl, 'can_attach_tag', lambda *_: False)
with self.call_should_fail(403):
self.call_api('attach_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tags': ['tag1:'],
})
def test_attach_tags_failed_proc(self):
inst, _ = self.repo_service.register_instance(
package_name='a/b',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
inst.processors_failure = ['failed proc']
inst.put()
resp = self.call_api('attach_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tags': ['tag1:'],
})
self.assertEqual({
'error_message': 'Failed processors: failed proc',
'status': 'PROCESSING_FAILED',
}, resp.json_body)
def test_attach_tags_pending_proc(self):
inst, _ = self.repo_service.register_instance(
package_name='a/b',
instance_id='a'*40,
caller=auth.Identity.from_bytes('user:abc@example.com'),
now=datetime.datetime(2014, 1, 1))
inst.processors_pending = ['pending proc']
inst.put()
resp = self.call_api('attach_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tags': ['tag1:'],
})
self.assertEqual({
'error_message': 'Pending processors: pending proc',
'status': 'PROCESSING_NOT_FINISHED_YET',
}, resp.json_body)
def test_detach_tags_ok(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1))
resp = self.call_api('detach_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tag': ['tag1:', 'tag2:'],
})
self.assertEqual({'status': 'SUCCESS'}, resp.json_body)
def test_detach_tags_no_access(self):
self.mock(api.acl, 'can_detach_tag', lambda *_: False)
with self.call_should_fail(403):
self.call_api('detach_tags', {
'package_name': 'a/b',
'instance_id': 'a'*40,
'tag': ['tag1:', 'tag2:'],
})
def test_search_in_single_package(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'a'*40)
self.set_tag('a/b', 'tag1:', datetime.datetime(2015, 1, 1), 'b'*40)
self.set_tag('a/b', 'tag1:', datetime.datetime(2016, 1, 1), 'c'*40)
self.set_tag('a/b', 'tag2:', datetime.datetime(2016, 1, 1), 'a'*40)
self.set_tag('d/e', 'tag1:', datetime.datetime(2017, 1, 1), 'a'*40)
resp = self.call_api('search_instances', {
'tag': 'tag1:',
'package_name': 'a/b',
})
self.assertEqual({
'instances': [
{
'instance_id': 'cccccccccccccccccccccccccccccccccccccccc',
'package_name': 'a/b',
'registered_by': 'user:abc@example.com',
'registered_ts': '1451606400000000',
},
{
'instance_id': 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
'package_name': 'a/b',
'registered_by': 'user:abc@example.com',
'registered_ts': '1420070400000000',
},
{
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'package_name': 'a/b',
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
},
],
'status': 'SUCCESS',
}, resp.json_body)
def test_search_globally(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'a'*40)
self.set_tag('a/b', 'tag1:', datetime.datetime(2015, 1, 1), 'b'*40)
self.set_tag('a/b', 'tag1:', datetime.datetime(2016, 1, 1), 'c'*40)
self.set_tag('a/b', 'tag2:', datetime.datetime(2016, 1, 1), 'a'*40)
self.set_tag('d/e', 'tag1:', datetime.datetime(2017, 1, 1), 'a'*40)
resp = self.call_api('search_instances', {'tag': 'tag1:'})
self.assertEqual({
'instances': [
{
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'package_name': 'd/e',
'registered_by': 'user:abc@example.com',
'registered_ts': '1483228800000000',
},
{
'instance_id': 'cccccccccccccccccccccccccccccccccccccccc',
'package_name': 'a/b',
'registered_by': 'user:abc@example.com',
'registered_ts': '1451606400000000',
},
{
'instance_id': 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
'package_name': 'a/b',
'registered_by': 'user:abc@example.com',
'registered_ts': '1420070400000000',
},
{
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'package_name': 'a/b',
'registered_by': 'user:abc@example.com',
'registered_ts': '1388534400000000',
},
],
'status': 'SUCCESS',
}, resp.json_body)
def test_search_no_access_single_pkg(self):
self.mock(api.acl, 'can_fetch_instance', lambda *_: False)
with self.call_should_fail(403):
self.call_api('search_instances', {
'tag': 'tag1:',
'package_name': 'a/b',
})
def test_search_no_access_globally(self):
def mocked_can_fetch_instance(pkg, _ident):
return pkg == 'd/e'
self.mock(api.acl, 'can_fetch_instance', mocked_can_fetch_instance)
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'a'*40)
self.set_tag('a/b', 'tag1:', datetime.datetime(2015, 1, 1), 'b'*40)
self.set_tag('a/b', 'tag1:', datetime.datetime(2016, 1, 1), 'c'*40)
self.set_tag('a/b', 'tag2:', datetime.datetime(2016, 1, 1), 'a'*40)
self.set_tag('d/e', 'tag1:', datetime.datetime(2017, 1, 1), 'a'*40)
resp = self.call_api('search_instances', {'tag': 'tag1:'})
self.assertEqual({
'instances': [
{
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'package_name': 'd/e',
'registered_by': 'user:abc@example.com',
'registered_ts': '1483228800000000',
},
],
'status': 'SUCCESS',
}, resp.json_body)
def test_resolve_version_works_instance_id(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'a'*40)
resp = self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'a'*40,
})
self.assertEqual({
'instance_id': 'a'*40,
'status': 'SUCCESS',
}, resp.json_body)
def test_resolve_version_works_ref(self):
self.register_mock_instance()
self.repo_service.set_package_ref(
'a/b', 'ref', 'a'*40, auth.Identity.from_bytes('user:abc@example.com'))
resp = self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'ref',
})
self.assertEqual({
'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'status': 'SUCCESS',
}, resp.json_body)
def test_resolve_version_works_tag(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'a'*40)
resp = self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'tag1:',
})
self.assertEqual({
'instance_id': 'a'*40,
'status': 'SUCCESS',
}, resp.json_body)
def test_resolve_version_not_valid_version(self):
resp = self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'NOT A VALID VERSION',
})
self.assertEqual({
'error_message': 'Not a valid instance ID or tag: "NOT A VALID VERSION"',
'status': 'ERROR',
}, resp.json_body)
def test_resolve_version_no_access(self):
self.mock(api.acl, 'can_fetch_instance', lambda *_: False)
with self.call_should_fail(403):
self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'tag1:',
})
def test_resolve_version_no_package(self):
resp = self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'tag1:',
})
self.assertEqual({'status': 'PACKAGE_NOT_FOUND'}, resp.json_body)
def test_resolve_version_no_instance_id(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'a'*40)
resp = self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'b'*40,
})
self.assertEqual({'status': 'INSTANCE_NOT_FOUND'}, resp.json_body)
def test_resolve_version_no_tag(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'a'*40)
resp = self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'tag2:',
})
self.assertEqual({'status': 'INSTANCE_NOT_FOUND'}, resp.json_body)
def test_resolve_version_ambigious_tag(self):
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'a'*40)
self.set_tag('a/b', 'tag1:', datetime.datetime(2014, 1, 1), 'b'*40)
resp = self.call_api('resolve_version', {
'package_name': 'a/b',
'version': 'tag1:',
})
self.assertEqual({
'error_message': 'More than one instance has tag "tag1:" set',
'status': 'AMBIGUOUS_VERSION',
}, resp.json_body)
class MockedRepoService(impl.RepoService):
"""Almost like a real one, except CAS part is stubbed."""
def __init__(self):
super(MockedRepoService, self).__init__(None)
self.uploaded = set()
def is_fetch_configured(self):
return True
def generate_fetch_url(self, instance):
return 'http://signed-url/%s' % instance.instance_id
def is_instance_file_uploaded(self, package_name, instance_id):
return instance_id in self.uploaded
def create_upload_session(self, package_name, instance_id, caller):
return 'http://upload_url', 'upload_session_id'
| |
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:Creative Commons GNU GPL v2
# (http://creativecommons.org/licenses/GPL/2.0/)
"""Unittests for tvdb_api
"""
import sys
import datetime
import unittest
sys.path.append("..")
import tvdb_api
import tvdb_ui
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
class test_tvdb_basic(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_different_case(self):
"""Checks the auto-correction of show names is working.
It should correct the weirdly capitalised 'sCruBs' to 'Scrubs'
"""
self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
def test_spaces(self):
"""Checks shownames with spaces
"""
self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
def test_numeric(self):
"""Checks numeric show names
"""
self.assertEquals(self.t['24'][2][20]['episodename'], 'Day 2: 3:00 A.M.-4:00 A.M.')
self.assertEquals(self.t['24']['seriesname'], '24')
def test_show_iter(self):
"""Iterating over a show returns each seasons
"""
self.assertEquals(
len(
[season for season in self.t['Life on Mars']]
),
3
)
def test_season_iter(self):
"""Iterating over a show returns episodes
"""
self.assertEquals(
len(
[episode for episode in self.t['Life on Mars'][1]]
),
8
)
def test_get_episode_overview(self):
"""Checks episode overview is retrieved correctly.
"""
self.assertEquals(
self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith(
'When a new copy of Doral, a Cylon who had been previously'),
True
)
class test_tvdb_errors(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_seasonnotfound(self):
"""Checks exception is thrown when season doesn't exist.
"""
self.assertRaises(tvdb_seasonnotfound, lambda:self.t['CNNNN'][10][1])
def test_shownotfound(self):
"""Checks exception is thrown when episode doesn't exist.
"""
self.assertRaises(tvdb_shownotfound, lambda:self.t['the fake show thingy'])
def test_episodenotfound(self):
"""Checks exception is raised for non-existent episode
"""
self.assertRaises(tvdb_episodenotfound, lambda:self.t['Scrubs'][1][30])
def test_attributenamenotfound(self):
"""Checks exception is thrown for if an attribute isn't found.
"""
self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething'])
self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething'])
class test_tvdb_search(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_search_len(self):
"""There should be only one result matching
"""
self.assertEquals(len(self.t['My Name Is Earl'].search('Faked His Own Death')), 1)
def test_search_checkname(self):
"""Checks you can get the episode name of a search result
"""
self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')
self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')
def test_search_multiresults(self):
"""Checks search can return multiple results
"""
self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True)
def test_search_no_params_error(self):
"""Checks not supplying search info raises TypeError"""
self.assertRaises(
TypeError,
lambda: self.t['Scrubs'].search()
)
def test_search_season(self):
"""Checks the searching of a single season"""
self.assertEquals(
len(self.t['Scrubs'][1].search("First")),
3
)
def test_search_show(self):
"""Checks the searching of an entire show"""
self.assertEquals(
len(self.t['CNNNN'].search('CNNNN', key='episodename')),
2
)
def test_aired_on(self):
"""Tests airedOn show method"""
sr = self.t['Scrubs'].airedOn(datetime.date(2001, 10, 2))
self.assertEquals(len(sr), 1)
self.assertEquals(sr[0]['episodename'], u'My First Day')
class test_tvdb_data(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_episode_data(self):
"""Check the firstaired value is retrieved
"""
self.assertEquals(
self.t['lost']['firstaired'],
'2004-09-22'
)
class test_tvdb_misc(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_repr_show(self):
"""Check repr() of Season
"""
self.assertEquals(
repr(self.t['CNNNN']),
"<Show Chaser Non-Stop News Network (CNNNN) (containing 2 seasons)>"
)
def test_repr_season(self):
"""Check repr() of Season
"""
self.assertEquals(
repr(self.t['CNNNN'][1]),
"<Season instance (containing 9 episodes)>"
)
def test_repr_episode(self):
"""Check repr() of Episode
"""
self.assertEquals(
repr(self.t['CNNNN'][1][1]),
"<Episode 01x01 - Terror Alert>"
)
def test_have_all_languages(self):
"""Check valid_languages is up-to-date (compared to languages.xml)
"""
et = self.t._getetsrc(
"http://www.thetvdb.com/api/%s/languages.xml" % (
self.t.config['apikey']
)
)
languages = [x.find("abbreviation").text for x in et.findall("Language")]
self.assertEquals(
sorted(languages),
sorted(self.t.config['valid_languages'])
)
class test_tvdb_languages(unittest.TestCase):
def test_episode_name_french(self):
"""Check episode data is in French (language="fr")
"""
t = tvdb_api.Tvdb(cache = True, language = "fr")
self.assertEquals(
t['scrubs'][1][1]['episodename'],
"Mon premier jour"
)
self.assertTrue(
t['scrubs']['overview'].startswith(
u"J.D. est un jeune m\xe9decin qui d\xe9bute"
)
)
def test_episode_name_spanish(self):
"""Check episode data is in Spanish (language="es")
"""
t = tvdb_api.Tvdb(cache = True, language = "es")
self.assertEquals(
t['scrubs'][1][1]['episodename'],
"Mi Primer Dia"
)
self.assertTrue(
t['scrubs']['overview'].startswith(
u'Scrubs es una divertida comedia'
)
)
def test_multilanguage_selection(self):
"""Check selected language is used
"""
class SelectEnglishUI(tvdb_ui.BaseUI):
def selectSeries(self, allSeries):
return [x for x in allSeries if x['language'] == "en"][0]
class SelectItalianUI(tvdb_ui.BaseUI):
def selectSeries(self, allSeries):
return [x for x in allSeries if x['language'] == "it"][0]
t_en = tvdb_api.Tvdb(
cache=True,
custom_ui = SelectEnglishUI,
language = "en")
t_it = tvdb_api.Tvdb(
cache=True,
custom_ui = SelectItalianUI,
language = "it")
self.assertEquals(
t_en['dexter'][1][2]['episodename'], "Crocodile"
)
self.assertEquals(
t_it['dexter'][1][2]['episodename'], "Lacrime di coccodrillo"
)
class test_tvdb_unicode(unittest.TestCase):
def test_search_in_chinese(self):
"""Check searching for show with language=zh returns Chinese seriesname
"""
t = tvdb_api.Tvdb(cache = True, language = "zh")
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
self.assertEquals(
type(show),
tvdb_api.Show
)
self.assertEquals(
show['seriesname'],
u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i'
)
def test_search_in_all_languages(self):
"""Check search_all_languages returns Chinese show, with language=en
"""
t = tvdb_api.Tvdb(cache = True, search_all_languages = True, language="en")
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
self.assertEquals(
type(show),
tvdb_api.Show
)
self.assertEquals(
show['seriesname'],
u'Virtues Of Harmony II'
)
class test_tvdb_banners(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = True)
def test_have_banners(self):
"""Check banners at least one banner is found
"""
self.assertEquals(
len(self.t['scrubs']['_banners']) > 0,
True
)
def test_banner_url(self):
"""Checks banner URLs start with http://
"""
for banner_type, banner_data in self.t['scrubs']['_banners'].items():
for res, res_data in banner_data.items():
for bid, banner_info in res_data.items():
self.assertEquals(
banner_info['_bannerpath'].startswith("http://"),
True
)
def test_episode_image(self):
"""Checks episode 'filename' image is fully qualified URL
"""
self.assertEquals(
self.t['scrubs'][1][1]['filename'].startswith("http://"),
True
)
def test_show_artwork(self):
"""Checks various image URLs within season data are fully qualified
"""
for key in ['banner', 'fanart', 'poster']:
self.assertEquals(
self.t['scrubs'][key].startswith("http://"),
True
)
class test_tvdb_actors(unittest.TestCase):
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True)
def test_actors_is_correct_datatype(self):
"""Check show/_actors key exists and is correct type"""
self.assertTrue(
isinstance(
self.t['scrubs']['_actors'],
tvdb_api.Actors
)
)
def test_actors_has_actor(self):
"""Check show has at least one Actor
"""
self.assertTrue(
isinstance(
self.t['scrubs']['_actors'][0],
tvdb_api.Actor
)
)
def test_actor_has_name(self):
"""Check first actor has a name"""
self.assertEquals(
self.t['scrubs']['_actors'][0]['name'],
"Zach Braff"
)
def test_actor_image_corrected(self):
"""Check image URL is fully qualified
"""
for actor in self.t['scrubs']['_actors']:
if actor['image'] is not None:
# Actor's image can be None, it displays as the placeholder
# image on thetvdb.com
self.assertTrue(
actor['image'].startswith("http://")
)
class test_tvdb_doctest(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_doctest(self):
"""Check docstring examples works"""
import doctest
doctest.testmod(tvdb_api)
#end test_tvdb
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner = runner)
| |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
DEFAULT_GROUP_NAME = "DEFAULT"
# The scheduler has options in several groups
METRICS_GROUP_NAME = "metrics"
TRUSTED_GROUP_NAME = "trusted_computing"
UPGRADE_GROUP_NAME = "upgrade_levels"
host_subset_size_opt = cfg.IntOpt("scheduler_host_subset_size",
default=1,
help="""
New instances will be scheduled on a host chosen randomly from a subset of the
N best hosts, where N is the value set by this option. Valid values are 1 or
greater. Any value less than one will be treated as 1.
Setting this to a value greater than 1 will reduce the chance that multiple
scheduler processes handling similar requests will select the same host,
creating a potential race condition. By selecting a host randomly from the N
hosts that best fit the request, the chance of a conflict is reduced. However,
the higher you set this value, the less optimal the chosen host may be for a
given request.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
bm_default_filter_opt = cfg.ListOpt("baremetal_scheduler_default_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
"ExactRamFilter",
"ExactDiskFilter",
"ExactCoreFilter",
],
help="""
This option specifies the filters used for filtering baremetal hosts. The value
should be a list of strings, with each string being the name of a filter class
to be used. When used, they will be applied in order, so place your most
restrictive filters first to make the filtering process more efficient.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
If the 'scheduler_use_baremetal_filters' option is False, this option has
no effect.
""")
use_bm_filters_opt = cfg.BoolOpt("scheduler_use_baremetal_filters",
default=False,
help="""
Set this to True to tell the nova scheduler that it should use the filters
specified in the 'baremetal_scheduler_default_filters' option. If you are not
scheduling baremetal nodes, leave this at the default setting of False.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
If this option is set to True, then the filters specified in the
'baremetal_scheduler_default_filters' are used instead of the filters
specified in 'scheduler_default_filters'.
""")
host_mgr_avail_filt_opt = cfg.MultiStrOpt("scheduler_available_filters",
default=["nova.scheduler.filters.all_filters"],
help="""
This is an unordered list of the filter classes the Nova scheduler may apply.
Only the filters specified in the 'scheduler_default_filters' option will be
used, but any filter appearing in that option must also be included in this
list.
By default, this is set to all filters that are included with Nova. If you wish
to change this, replace this with a list of strings, where each element is the
path to a filter.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
scheduler_default_filters
""")
host_mgr_default_filt_opt = cfg.ListOpt("scheduler_default_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"RamFilter",
"DiskFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
"ServerGroupAntiAffinityFilter",
"ServerGroupAffinityFilter",
],
help="""
This option is the list of filter class names that will be used for filtering
hosts. The use of 'default' in the name of this option implies that other
filters may sometimes be used, but that is not the case. These filters will be
applied in the order they are listed, so place your most restrictive filters
first to make the filtering process more efficient.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
All of the filters in this option *must* be present in the
'scheduler_available_filters' option, or a SchedulerHostFilterNotFound
exception will be raised.
""")
host_mgr_sched_wgt_cls_opt = cfg.ListOpt("scheduler_weight_classes",
default=["nova.scheduler.weights.all_weighers"],
help="""
This is a list of weigher class names. Only hosts which pass the filters are
weighed. The weight for any host starts at 0, and the weighers order these
hosts by adding to or subtracting from the weight assigned by the previous
weigher. Weights may become negative.
An instance will be scheduled to one of the N most-weighted hosts, where N is
'scheduler_host_subset_size'.
By default, this is set to all weighers that are included with Nova. If you
wish to change this, replace this with a list of strings, where each element is
the path to a weigher.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
host_mgr_tracks_inst_chg_opt = cfg.BoolOpt("scheduler_tracks_instance_changes",
default=True,
help="""
The scheduler may need information about the instances on a host in order to
evaluate its filters and weighers. The most common need for this information is
for the (anti-)affinity filters, which need to choose a host based on the
instances already running on a host.
If the configured filters and weighers do not need this information, disabling
this option will improve performance. It may also be disabled when the tracking
overhead proves too heavy, although this will cause classes requiring host
usage data to query the database on each request instead.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
rpc_sched_topic_opt = cfg.StrOpt("scheduler_topic",
default="scheduler",
help="""
This is the message queue topic that the scheduler 'listens' on. It is used
when the scheduler service is started up to configure the queue, and whenever
an RPC call to the scheduler is made. There is almost never any reason to ever
change this value.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
scheduler_json_config_location_opt = cfg.StrOpt(
"scheduler_json_config_location",
default="",
help="""
The absolute path to the scheduler configuration JSON file, if any. This file
location is monitored by the scheduler for changes and reloads it if needed. It
is converted from JSON to a Python data structure, and passed into the
filtering and weighing functions of the scheduler, which can use it for dynamic
configuration.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
sched_driver_host_mgr_opt = cfg.StrOpt("scheduler_host_manager",
default="host_manager",
help="""
The scheduler host manager to use, which manages the in-memory picture of the
hosts that the scheduler uses.
The option value should be chosen from one of the entrypoints under the
namespace 'nova.scheduler.host_manager' of file 'setup.cfg'. For example,
'host_manager' is the default setting. Aside from the default, the only other
option as of the Mitaka release is 'ironic_host_manager', which should be used
if you're using Ironic to provision bare-metal instances.
This option also supports a full class path style, for example
"nova.scheduler.host_manager.HostManager", but note this support is deprecated
and will be dropped in the N release.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
driver_opt = cfg.StrOpt("scheduler_driver",
default="filter_scheduler",
help="""
The class of the driver used by the scheduler. This should be chosen from one
of the entrypoints under the namespace 'nova.scheduler.driver' of file
'setup.cfg'. If nothing is specified in this option, the 'filter_scheduler' is
used.
This option also supports deprecated full Python path to the class to be used.
For example, "nova.scheduler.filter_scheduler.FilterScheduler". But note: this
support will be dropped in the N Release.
Other options are:
* 'caching_scheduler' which aggressively caches the system state for better
individual scheduler performance at the risk of more retries when running
multiple schedulers.
* 'chance_scheduler' which simply picks a host at random.
* 'fake_scheduler' which is used for testing.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
driver_period_opt = cfg.IntOpt("scheduler_driver_task_period",
default=60,
help="""
This value controls how often (in seconds) to run periodic tasks in the
scheduler. The specific tasks that are run for each period are determined by
the particular scheduler being used.
If this is larger than the nova-service 'service_down_time' setting, Nova may
report the scheduler service as down. This is because the scheduler driver is
responsible for sending a heartbeat and it will only do that as often as this
option allows. As each scheduler can work a little differently than the others,
be sure to test this with your selected scheduler.
* Services that use this:
``nova-scheduler``
* Related options:
``nova-service service_down_time``
""")
isolated_img_opt = cfg.ListOpt("isolated_images",
default=[],
help="""
If there is a need to restrict some images to only run on certain designated
hosts, list those image UUIDs here.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
scheduler/isolated_hosts
scheduler/restrict_isolated_hosts_to_isolated_images
""")
isolated_host_opt = cfg.ListOpt("isolated_hosts",
default=[],
help="""
If there is a need to restrict some images to only run on certain designated
hosts, list those host names here.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
scheduler/isolated_images
scheduler/restrict_isolated_hosts_to_isolated_images
""")
restrict_iso_host_img_opt = cfg.BoolOpt(
"restrict_isolated_hosts_to_isolated_images",
default=True,
help="""
This setting determines if the scheduler's isolated_hosts filter will allow
non-isolated images on a host designated as an isolated host. When set to True
(the default), non-isolated images will not be allowed to be built on isolated
hosts. When False, non-isolated images can be built on both isolated and
non-isolated hosts alike.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. Even
then, this option doesn't affect the behavior of requests for isolated images,
which will *always* be restricted to isolated hosts.
* Services that use this:
``nova-scheduler``
* Related options:
scheduler/isolated_images
scheduler/isolated_hosts
""")
# This option specifies an option group, so register separately
rpcapi_cap_opt = cfg.StrOpt("scheduler",
help="""
Sets a version cap (limit) for messages sent to scheduler services. In the
situation where there were multiple scheduler services running, and they were
not being upgraded together, you would set this to the lowest deployed version
to guarantee that other services never send messages that any of your running
schedulers cannot understand.
This is rarely needed in practice as most deployments run a single scheduler.
It exists mainly for design compatibility with the other services, such as
compute, which are routinely upgraded in a rolling fashion.
* Services that use this:
``nova-compute, nova-conductor``
* Related options:
None
""")
# These opts are registered as a separate OptGroup
trusted_opts = [
cfg.StrOpt("attestation_server",
help="""
The host to use as the attestation server.
Cloud computing pools can involve thousands of compute nodes located at
different geographical locations, making it difficult for cloud providers to
identify a node's trustworthiness. When using the Trusted filter, users can
request that their VMs only be placed on nodes that have been verified by the
attestation server specified in this option.
The value is a string, and can be either an IP address or FQDN.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
attestation_server_ca_file
attestation_port
attestation_api_url
attestation_auth_blob
attestation_auth_timeout
attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_server_ca_file",
help="""
The absolute path to the certificate to use for authentication when connecting
to the attestation server. See the `attestation_server` help text for more
information about host verification.
The value is a string, and must point to a file that is readable by the
scheduler.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
attestation_server
attestation_port
attestation_api_url
attestation_auth_blob
attestation_auth_timeout
attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_port",
default="8443",
help="""
The port to use when connecting to the attestation server. See the
`attestation_server` help text for more information about host verification.
Valid values are strings, not integers, but must be digits only.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
attestation_server
attestation_server_ca_file
attestation_api_url
attestation_auth_blob
attestation_auth_timeout
attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_api_url",
default="/OpenAttestationWebServices/V1.0",
help="""
The URL on the attestation server to use. See the `attestation_server` help
text for more information about host verification.
This value must be just that path portion of the full URL, as it will be joined
to the host specified in the attestation_server option.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
attestation_server
attestation_server_ca_file
attestation_port
attestation_auth_blob
attestation_auth_timeout
attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_auth_blob",
help="""
Attestation servers require a specific blob that is used to authenticate. The
content and format of the blob are determined by the particular attestation
server being used. There is no default value; you must supply the value as
specified by your attestation service. See the `attestation_server` help text
for more information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
attestation_server
attestation_server_ca_file
attestation_port
attestation_api_url
attestation_auth_timeout
attestation_insecure_ssl
"""),
cfg.IntOpt("attestation_auth_timeout",
default=60,
help="""
This value controls how long a successful attestation is cached. Once this
period has elapsed, a new attestation request will be made. See the
`attestation_server` help text for more information about host verification.
The value is in seconds. Valid values must be positive integers for any
caching; setting this to zero or a negative value will result in calls to the
attestation_server for every request, which may impact performance.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
attestation_server
attestation_server_ca_file
attestation_port
attestation_api_url
attestation_auth_blob
attestation_insecure_ssl
"""),
cfg.BoolOpt("attestation_insecure_ssl",
default=False,
help="""
When set to True, the SSL certificate verification is skipped for the
attestation service. See the `attestation_server` help text for more
information about host verification.
Valid values are True or False. The default is False.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
attestation_server
attestation_server_ca_file
attestation_port
attestation_api_url
attestation_auth_blob
attestation_auth_timeout
"""),
]
max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
default=8,
help="""
This setting caps the number of instances on a host that can be actively
performing IO (in a build, resize, snapshot, migrate, rescue, or unshelve task
state) before that host becomes ineligible to build new instances.
Valid values are positive integers: 1 or greater.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'io_ops_filter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
agg_img_prop_iso_namespace_opt = cfg.StrOpt(
"aggregate_image_properties_isolation_namespace",
help="""
Images and hosts can be configured so that certain images can only be scheduled
to hosts in a particular aggregate. This is done with metadata values set on
the host aggregate that are identified by beginning with the value of this
option. If the host is part of an aggregate with such a metadata key, the image
in the request spec must have the value of that metadata in its properties in
order for the scheduler to consider the host as acceptable.
Valid values are strings.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'aggregate_image_properties_isolation' filter is
enabled.
* Services that use this:
``nova-scheduler``
* Related options:
aggregate_image_properties_isolation_separator
""")
agg_img_prop_iso_separator_opt = cfg.StrOpt(
"aggregate_image_properties_isolation_separator",
default=".",
help="""
When using the aggregate_image_properties_isolation filter, the relevant
metadata keys are prefixed with the namespace defined in the
aggregate_image_properties_isolation_namespace configuration option plus a
separator. This option defines the separator to be used. It defaults to a
period ('.').
Valid values are strings.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'aggregate_image_properties_isolation' filter is
enabled.
* Services that use this:
``nova-scheduler``
* Related options:
aggregate_image_properties_isolation_namespace
""")
max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host",
default=50,
help="""
If you need to limit the number of instances on any given host, set this option
to the maximum number of instances you want to allow. The num_instances_filter
will reject any host that has at least as many instances as this option's
value.
Valid values are positive integers; setting it to zero will cause all hosts to
be rejected if the num_instances_filter is active.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'num_instances_filter' filter is enabled.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
ram_weight_mult_opt = cfg.FloatOpt("ram_weight_multiplier",
default=1.0,
help="""
This option determines how hosts with more or less available RAM are weighed. A
positive value will result in the scheduler preferring hosts with more
available RAM, and a negative number will result in the scheduler preferring
hosts with less available RAM. Another way to look at it is that positive
values for this option will tend to spread instances across many hosts, while
negative values will tend to fill up (stack) hosts as much as possible before
scheduling to a less-used host. The absolute value, whether positive or
negative, controls how strong the RAM weigher is relative to other weighers.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'ram' weigher is enabled.
Valid values are numeric, either integer or float.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
disk_weight_mult_opt = cfg.FloatOpt("disk_weight_multiplier",
default=1.0,
help="Multiplier used for weighing free disk space. Negative "
"numbers mean to stack vs spread.")
io_ops_weight_mult_opt = cfg.FloatOpt("io_ops_weight_multiplier",
default=-1.0,
help="""
This option determines how hosts with differing workloads are weighed. Negative
values, such as the default, will result in the scheduler preferring hosts with
lighter workloads whereas positive values will prefer hosts with heavier
workloads. Another way to look at it is that positive values for this option
will tend to schedule instances onto hosts that are already busy, while
negative values will tend to distribute the workload across more hosts. The
absolute value, whether positive or negative, controls how strong the io_ops
weigher is relative to other weighers.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'io_ops' weigher is enabled.
Valid values are numeric, either integer or float.
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
# These opts are registered as a separate OptGroup
metrics_weight_opts = [
cfg.FloatOpt("weight_multiplier",
default=1.0,
help="""
When using metrics to weight the suitability of a host, you can use this option
to change how the calculated weight influences the weight assigned to a host as
follows:
* Greater than 1.0: increases the effect of the metric on overall weight.
* Equal to 1.0: No change to the calculated weight.
* Less than 1.0, greater than 0: reduces the effect of the metric on
overall weight.
* 0: The metric value is ignored, and the value of the
'weight_of_unavailable' option is returned instead.
* Greater than -1.0, less than 0: the effect is reduced and reversed.
* -1.0: the effect is reversed
* Less than -1.0: the effect is increased proportionally and reversed.
Valid values are numeric, either integer or float.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
weight_of_unavailable
"""),
cfg.ListOpt("weight_setting",
default=[],
help="""
This setting specifies the metrics to be weighed and the relative ratios for
each metric. This should be a single string value, consisting of a series of
one or more 'name=ratio' pairs, separated by commas, where 'name' is the name
of the metric to be weighed, and 'ratio' is the relative weight for that
metric.
Note that if the ratio is set to 0, the metric value is ignored, and instead
the weight will be set to the value of the 'weight_of_unavailable' option.
As an example, let's consider the case where this option is set to:
``name1=1.0, name2=-1.3``
The final weight will be:
``(name1.value * 1.0) + (name2.value * -1.3)``
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
weight_of_unavailable
"""),
cfg.BoolOpt("required",
default=True,
help="""
This setting determines how any unavailable metrics are treated. If this option
is set to True, any hosts for which a metric is unavailable will raise an
exception, so it is recommended to also use the MetricFilter to filter out
those hosts before weighing.
When this option is False, any metric being unavailable for a host will set the
host weight to 'weight_of_unavailable'.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
weight_of_unavailable
"""),
cfg.FloatOpt("weight_of_unavailable",
default=float(-10000.0),
help="""
When any of the following conditions are met, this value will be used in place
of any actual metric value:
* One of the metrics named in 'weight_setting' is not available for a host,
and the value of 'required' is False.
* The ratio specified for a metric in 'weight_setting' is 0.
* The 'weight_multiplier' option is set to 0.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
* Services that use this:
``nova-scheduler``
* Related options:
weight_setting
required
weight_multiplier
"""),
]
scheduler_max_att_opt = cfg.IntOpt("scheduler_max_attempts",
default=3,
help="""
This is the maximum number of attempts that will be made to schedule an
instance before it is assumed that the failures aren't due to normal occasional
race conflicts, but rather some other problem. When this is reached a
MaxRetriesExceeded exception is raised, and the instance is set to an error
state.
Valid values are positive integers (1 or greater).
* Services that use this:
``nova-scheduler``
* Related options:
None
""")
soft_affinity_weight_opt = cfg.FloatOpt('soft_affinity_weight_multiplier',
default=1.0,
help='Multiplier used for weighing hosts '
'for group soft-affinity. Only a '
'positive value is meaningful. Negative '
'means that the behavior will change to '
'the opposite, which is soft-anti-affinity.')
soft_anti_affinity_weight_opt = cfg.FloatOpt(
'soft_anti_affinity_weight_multiplier',
default=1.0,
help='Multiplier used for weighing hosts '
'for group soft-anti-affinity. Only a '
'positive value is meaningful. Negative '
'means that the behavior will change to '
'the opposite, which is soft-affinity.')
default_opts = [host_subset_size_opt,
bm_default_filter_opt,
use_bm_filters_opt,
host_mgr_avail_filt_opt,
host_mgr_default_filt_opt,
host_mgr_sched_wgt_cls_opt,
host_mgr_tracks_inst_chg_opt,
rpc_sched_topic_opt,
sched_driver_host_mgr_opt,
driver_opt,
driver_period_opt,
scheduler_json_config_location_opt,
isolated_img_opt,
isolated_host_opt,
restrict_iso_host_img_opt,
max_io_ops_per_host_opt,
agg_img_prop_iso_namespace_opt,
agg_img_prop_iso_separator_opt,
max_instances_per_host_opt,
ram_weight_mult_opt,
disk_weight_mult_opt,
io_ops_weight_mult_opt,
scheduler_max_att_opt,
soft_affinity_weight_opt,
soft_anti_affinity_weight_opt,
]
def register_opts(conf):
conf.register_opts(default_opts)
conf.register_opt(rpcapi_cap_opt, UPGRADE_GROUP_NAME)
trust_group = cfg.OptGroup(name=TRUSTED_GROUP_NAME,
title="Trust parameters")
conf.register_group(trust_group)
conf.register_opts(trusted_opts, group=trust_group)
conf.register_opts(metrics_weight_opts, group=METRICS_GROUP_NAME)
def list_opts():
return {DEFAULT_GROUP_NAME: default_opts,
UPGRADE_GROUP_NAME: [rpcapi_cap_opt],
TRUSTED_GROUP_NAME: trusted_opts,
METRICS_GROUP_NAME: metrics_weight_opts,
}
| |
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Student Project.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import logging
import time
from google.appengine.ext import db
from django import forms
from django import http
from django.utils import simplejson
from django.utils.translation import ugettext
from soc.logic import cleaning
from soc.logic import dicts
from soc.views import out_of_band
from soc.views.helper import decorators
from soc.views.helper import dynaform
from soc.views.helper import forms as forms_helper
from soc.views.helper import lists
from soc.views.helper import params as params_helper
from soc.views.helper import redirects
from soc.views.helper import responses
from soc.views.helper import widgets
from soc.views.models import base
from soc.modules.gsoc.logic.models import student as student_logic
from soc.modules.gsoc.logic.models.mentor import logic as mentor_logic
from soc.modules.gsoc.logic.models.org_admin import logic as org_admin_logic
from soc.modules.gsoc.logic.models.organization import logic as org_logic
from soc.modules.gsoc.logic.models.program import logic as program_logic
from soc.modules.gsoc.logic.models.student_project import logic as \
project_logic
from soc.modules.gsoc.views.helper import access
from soc.modules.gsoc.views.models import organization as org_view
class View(base.View):
"""View methods for the Student Project model.
"""
DEF_NO_RECORD_AVAILABLE_MSG = ugettext('No Record Available')
DEF_VIEW_RECORD_MSG = ugettext('View Record')
DEF_TAKE_SURVEY_MSG = ugettext('Take Survey')
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = access.GSoCChecker(params)
rights['any_access'] = ['allow']
rights['create'] = ['checkIsDeveloper']
rights['edit'] = ['checkIsDeveloper']
rights['delete'] = ['checkIsDeveloper']
rights['show'] = ['allow']
rights['list'] = ['checkIsDeveloper']
rights['manage'] = [('checkHasRoleForScope',
[org_admin_logic, ['active', 'inactive']]),
('checkStudentProjectHasStatus', [['accepted', 'failed', 'completed',
'withdrawn']])]
rights['manage_overview'] = [
('checkHasAny', [
[('checkHasRoleForScope', [org_admin_logic,
['active', 'inactive']]),
('checkHasRoleForScope', [mentor_logic,
['active', 'inactive']])
]])]
# TODO: lack of better name here!
rights['st_edit'] = [
'checkCanEditStudentProjectAsStudent',
('checkStudentProjectHasStatus',
[['accepted', 'completed']])
]
rights['overview'] = [('checkIsHostForProgram', [program_logic])]
new_params = {}
new_params['logic'] = project_logic
new_params['rights'] = rights
new_params['name'] = 'Student Project'
new_params['url_name'] = 'gsoc/student_project'
new_params['module_package'] = 'soc.modules.gsoc.views.models'
new_params['sidebar_grouping'] = 'Students'
new_params['scope_view'] = org_view
new_params['scope_redirect'] = redirects.getCreateRedirect
new_params['no_create_with_key_fields'] = True
new_params['extra_dynaexclude'] = ['program', 'status', 'link_id',
'mentor', 'additional_mentors',
'student', 'passed_evaluations',
'failed_evaluations']
new_params['create_extra_dynaproperties'] = {
'scope_path': forms.CharField(widget=forms.HiddenInput,
required=True),
'public_info': forms.fields.CharField(required=True,
widget=widgets.FullTinyMCE(attrs={'rows': 25, 'cols': 100})),
'student_id': forms.CharField(label='Student Link ID',
required=True),
'mentor_id': forms.CharField(label='Mentor Link ID',
required=True),
'clean_abstract': cleaning.clean_content_length('abstract'),
'clean_public_info': cleaning.clean_html_content('public_info'),
'clean_student': cleaning.clean_link_id('student'),
'clean_mentor': cleaning.clean_link_id('mentor'),
'clean_additional_info': cleaning.clean_url('additional_info'),
'clean_feed_url': cleaning.clean_feed_url('feed_url'),
'clean': cleaning.validate_student_project('scope_path',
'mentor_id', 'student_id')
}
new_params['edit_extra_dynaproperties'] = {
'link_id': forms.CharField(widget=forms.HiddenInput),
}
patterns = [
(r'^%(url_name)s/(?P<access_type>manage_overview)/%(scope)s$',
'soc.modules.gsoc.views.models.%(module_name)s.manage_overview',
'Overview of %(name_plural)s to Manage for'),
(r'^%(url_name)s/(?P<access_type>manage)/%(key_fields)s$',
'soc.modules.gsoc.views.models.%(module_name)s.manage',
'Manage %(name)s'),
(r'^%(url_name)s/(?P<access_type>st_edit)/%(key_fields)s$',
'soc.modules.gsoc.views.models.%(module_name)s.st_edit',
'Edit my %(name)s'),
(r'^%(url_name)s/(?P<access_type>overview)/(?P<scope_path>%(ulnp)s)/%(lnp)s$',
'soc.modules.gsoc.views.models.%(module_name)s.overview',
'Overview of all %(name_plural)s for'),
]
new_params['extra_django_patterns'] = patterns
new_params['edit_template'] = 'soc/student_project/edit.html'
new_params['manage_template'] = 'soc/student_project/manage.html'
new_params['public_field_prefetch'] = ['mentor', 'student', 'scope']
new_params['public_field_extra'] = lambda entity: {
'student': entity.student.name(),
'mentor': entity.mentor.name(),
'org': entity.scope.name,
}
new_params['public_field_keys'] = ['student', 'title', 'mentor',
'org', 'status']
new_params['public_field_names'] = ['Student', 'Title', 'Mentor',
'Organization', 'Status']
new_params['org_home_field_prefetch'] = ['mentor', 'student']
new_params['org_home_field_extra'] = lambda entity: {
'student': entity.student.name(),
'mentor': ', '.join(
mentor.name() for mentor in
[entity.mentor] + db.get(entity.additional_mentors))
}
new_params['org_home_field_keys'] = ['student', 'title', 'mentor',
'status']
new_params['org_home_field_names'] = ['Student', 'Title',
'Mentor', 'Status']
# define the list redirect action to show the notification
new_params['public_row_extra'] = new_params[
'org_home_row_extra'] = lambda entity: {
'link': redirects.getPublicRedirect(entity, new_params)
}
new_params['org_home_row_action'] = {
'type': 'redirect_custom',
'parameters': dict(new_window=False),
}
new_params['admin_field_prefetch'] = ['mentor', 'student', 'scope']
new_params['admin_field_extra'] = lambda entity: {
'student': entity.student.name(),
'mentor': entity.mentor.name(),
'student_id': entity.student.link_id
}
new_params['admin_field_keys'] = ['student', 'title', 'mentor', 'status',
'student_id']
new_params['admin_field_names'] = ['Student', 'Title', 'Mentor', 'Status',
'Student Link ID']
new_params['admin_field_hidden'] = ['student_id']
new_params['admin_conf_extra'] = {
'multiselect': True,
}
new_params['admin_button_global'] = [
{
'bounds': [1,'all'],
'id': 'withdraw',
'caption': 'Withdraw Project',
'type': 'post',
'parameters': {
'url': '',
'keys': ['key'],
'refresh': 'current',
}
},
{
'bounds': [1,'all'],
'id': 'accept',
'caption': 'Accept Project',
'type': 'post',
'parameters': {
'url': '',
'keys': ['key'],
'refresh': 'current',
}
}]
params = dicts.merge(params, new_params)
super(View, self).__init__(params=params)
# create the form that students will use to edit their projects
dynaproperties = {
'public_info': forms.fields.CharField(required=True,
widget=widgets.FullTinyMCE(attrs={'rows': 25, 'cols': 100})),
'clean_abstract': cleaning.clean_content_length('abstract'),
'clean_public_info': cleaning.clean_html_content('public_info'),
'clean_additional_info': cleaning.clean_url('additional_info'),
'clean_feed_url': cleaning.clean_feed_url('feed_url'),
}
student_edit_form = dynaform.newDynaForm(
dynabase = self._params['dynabase'],
dynamodel = self._params['logic'].getModel(),
dynaexclude = self._params['create_dynaexclude'],
dynaproperties = dynaproperties,
)
self._params['student_edit_form'] = student_edit_form
def _editGet(self, request, entity, form):
"""See base.View._editGet().
"""
form.fields['link_id'].initial = entity.link_id
form.fields['student_id'].initial = entity.student.link_id
form.fields['mentor_id'].initial = entity.mentor.link_id
return super(View, self)._editGet(request, entity, form)
def _editPost(self, request, entity, fields):
"""See base.View._editPost().
"""
if not entity:
fields['link_id'] = 't%i' % (int(time.time()*100))
else:
fields['link_id'] = entity.link_id
# fill in the scope via call to super
super(View, self)._editPost(request, entity, fields)
# editing a project so set the program, student and mentor field
if entity:
organization = entity.scope
else:
organization = fields['scope']
fields['program'] = organization.scope
filter = {'scope': fields['program'],
'link_id': fields['student_id']}
fields['student'] = student_logic.logic.getForFields(filter, unique=True)
filter = {'scope': organization,
'link_id': fields['mentor_id'],
'status': 'active'}
fields['mentor'] = mentor_logic.getForFields(filter, unique=True)
def _public(self, request, entity, context):
"""Adds the names of all additional mentors to the context.
For params see base.View._public()
"""
additional_mentors = entity.additional_mentors
if not additional_mentors:
context['additional_mentors'] = []
else:
mentor_names = []
for mentor_key in additional_mentors:
additional_mentor = mentor_logic.getFromKeyName(
mentor_key.id_or_name())
mentor_names.append(additional_mentor.name())
context['additional_mentors'] = ', '.join(mentor_names)
def getOverviewData(self, request, params, program):
"""Return data for withdraw.
"""
fields = {
'program': program,
}
idx = lists.getListIndex(request)
if idx != 0:
return lists.getErrorResponse(request, "idx not valid")
contents = lists.getListData(request, params, fields, visibility='admin')
return lists.getResponse(request, contents)
@decorators.merge_params
@decorators.check_access
def overview(self, request, access_type,
page_name=None, params=None, **kwargs):
"""View that allows Program Admins to see/control all StudentProjects.
For args see base.View().public()
"""
program_entity = program_logic.getFromKeyFieldsOr404(kwargs)
if request.POST:
return self.overviewPost(request, params, program_entity)
else: #request.GET
return self.overviewGet(request, page_name, params, program_entity,
**kwargs)
def overviewPost(self, request, params, program):
"""Handles the POST request for the Program Admins overview page.
Args:
request: Django HTTPRequest object
params: Params for this view
program: GSoCProgram entity
"""
project_logic = params['logic']
post_dict = request.POST
data = simplejson.loads(post_dict.get('data', '[]'))
button_id = post_dict.get('button_id', '')
if button_id not in ['withdraw', 'accept']:
logging.warning('Invalid button ID found %s' %(button_id))
return http.HttpResponse()
project_keys = []
for selected in data:
project_keys.append(selected['key'])
# get all projects and prefetch the program field
projects = project_logic.getFromKeyName(project_keys)
project_logic.prefetchField('program', projects)
# filter out all projects not belonging to the current program
projects = [p for p in projects if p.program.key() == program.key()]
for p in projects:
fields = {}
if button_id == 'withdraw':
fields['status'] = 'withdrawn'
elif button_id == 'accept':
fields['status'] = 'accepted'
# update the project with the new status
project_logic.updateEntityProperties(p, fields)
# return a 200 response
return http.HttpResponse()
def overviewGet(self, request, page_name, params, program_entity, **kwargs):
"""Handles the GET request for the Program Admins overview page.
Args:
request: Django HTTPRequest object
page_name: Name for this page
params: Params for this view
program_entity: GSocProgram entity
"""
page_name = '%s %s' %(page_name, program_entity.name)
list_params = params.copy()
list_params['admin_row_extra'] = lambda entity: {
'link': redirects.getPublicRedirect(entity, list_params)
}
list_params['list_description'] = ugettext(
'An overview of all StudentProjects for %s. Click on an item to view '
'the project, use the buttons on the list for withdrawing a project.'%
(program_entity.name))
if lists.isDataRequest(request):
return self.getOverviewData(request, list_params, program_entity)
project_list = lists.getListGenerator(request, list_params, idx=0, visibility='admin')
# fill contents with the list
contents = [project_list]
# call the _list method from base to display the list
return self._list(request, params, contents, page_name)
def _getManageData(self, request, gps_params, ps_params, entity):
"""Returns the JSONResponse for the Manage page.
Args:
request: HTTPRequest object
gps_params: GradingProjectSurvey list params
ps_params: ProjectSurvey list params
entity: StudentProject entity
"""
idx = lists.getListIndex(request)
if idx == 0:
params = gps_params
elif idx == 1:
params = ps_params
else:
return lists.getErrorResponse(request, "idx not valid")
fields = {'project': entity}
record_logic = params['logic'].getRecordLogic()
record_entities = record_logic.getForFields(fields)
record_dict = dict((i.survey.key(), i) for i in record_entities if i.survey)
record_getter = lambda entity: record_dict.get(entity.key())
args = [record_getter]
fields = {'scope': entity.program,
'prefix': 'gsoc_program'}
contents = lists.getListData(request, params, fields, args=args)
return lists.getResponse(request, contents)
@decorators.merge_params
@decorators.check_access
def manage(self, request, access_type,
page_name=None, params=None, **kwargs):
"""View that allows Organization Admins to manage their Student Projects.
For params see base.View().public()
"""
import soc.logic.lists
from soc.modules.gsoc.views.models.grading_project_survey import view as \
grading_survey_view
from soc.modules.gsoc.views.models.project_survey import view as \
project_survey_view
entity = self._logic.getFromKeyFieldsOr404(kwargs)
template = params['manage_template']
# get the context for this webpage
context = responses.getUniversalContext(request)
responses.useJavaScript(context, params['js_uses_all'])
context['page_name'] = "%s '%s' from %s" % (page_name, entity.title,
entity.student.name())
context['entity'] = entity
if project_logic.canChangeMentors(entity):
# only accepted project can have their mentors managed
self._enableMentorManagement(entity, params, context)
# list all surveys for this Project's Program
gps_params = grading_survey_view.getParams().copy()
gps_params['list_description'] = \
'List of all Mentor Evaluations for this Project'
gps_params['public_row_extra'] = lambda entity, *args: {}
gps_params['public_row_action'] = {}
gps_params['public_field_keys'] = [
"title", "taken_by", "taken_on", "record_url", "take_url"
]
gps_params['public_field_names'] = [
"Title", "Taken by", "Taken on", "View", "(Re) Take",
]
no_record = self.DEF_NO_RECORD_AVAILABLE_MSG
# TODO(SRabbelier): use buttons instead
project_entity = entity
getExtra = lambda params: lambda entity, re: {
"taken_by": no_record if not re(entity) else re(entity).user.name,
"taken_on": no_record if not re(entity) else str(re(entity).modified),
"record_url": no_record if not re(entity) else lists.urlize(
redirects.getViewSurveyRecordRedirect(re(entity), params),
name=self.DEF_VIEW_RECORD_MSG),
"take_url": lists.urlize(redirects.getTakeProjectSurveyRedirect(
project_entity, {'survey': entity, 'params': params}),
name=self.DEF_TAKE_SURVEY_MSG),
}
gps_params['public_field_extra'] = getExtra(gps_params)
# get the ProjectSurvey list
ps_params = project_survey_view.getParams().copy()
ps_params['list_description'] = \
'List of all Student Evaluations for this Project'
ps_params['public_row_extra'] = lambda entity, *args: {}
ps_params['public_row_action'] = {}
ps_params['public_field_keys'] = gps_params['public_field_keys']
ps_params['public_field_names'] = gps_params['public_field_names']
ps_params['public_field_ignore'] = ["take_url"]
ps_params['public_field_extra'] = getExtra(ps_params)
if lists.isDataRequest(request):
return self._getManageData(request, gps_params, ps_params, entity)
gps_list = lists.getListGenerator(request, gps_params, idx=0)
ps_list = lists.getListGenerator(request, ps_params, idx=1)
# store both lists in the content
content = [gps_list, ps_list]
context['evaluation_list'] = soc.logic.lists.Lists(content)
if request.POST:
return self.managePost(request, template, context, params, entity,
**kwargs)
else: #request.GET
return self.manageGet(request, template, context, params, entity,
**kwargs)
def _enableMentorManagement(self, entity, params, context):
"""Sets the data required to manage mentors for a StudentProject.
Args:
entity: StudentProject entity to manage
params: params dict for the manage view
context: context for the manage view
"""
context['can_manage_mentors'] = True
# get all mentors for this organization
fields = {'scope': entity.scope,
'status': 'active'}
mentors = mentor_logic.getForFields(fields)
choices = [(mentor.link_id,'%s (%s)' %(mentor.name(), mentor.link_id))
for mentor in mentors]
# create the form that org admins will use to reassign a mentor
dynafields = [
{'name': 'mentor_id',
'base': forms.ChoiceField,
'label': 'Primary Mentor',
'required': True,
'passthrough': ['required', 'choices', 'label'],
'choices': choices,
},]
dynaproperties = params_helper.getDynaFields(dynafields)
mentor_edit_form = dynaform.newDynaForm(
dynabase = params['dynabase'],
dynaproperties = dynaproperties,
)
params['mentor_edit_form'] = mentor_edit_form
additional_mentors = entity.additional_mentors
# we want to show the names of the additional mentors in the context
# therefore they need to be resolved to entities first
additional_mentors_context = []
for mentor_key in additional_mentors:
mentor_entity = mentor_logic.getFromKeyName(
mentor_key.id_or_name())
additional_mentors_context.append(mentor_entity)
context['additional_mentors'] = additional_mentors_context
# all mentors who are not already an additional mentor or
# the primary mentor are allowed to become an additional mentor
possible_additional_mentors = [m for m in mentors if
(m.key() not in additional_mentors)
and (m.key() != entity.mentor.key())]
# create the information to be shown on the additional mentor form
additional_mentor_choices = [
(mentor.link_id,'%s (%s)' %(mentor.name(), mentor.link_id))
for mentor in possible_additional_mentors]
dynafields = [
{'name': 'mentor_id',
'base': forms.ChoiceField,
'label': 'Co-Mentor',
'required': True,
'passthrough': ['required', 'choices', 'label'],
'choices': additional_mentor_choices,
},]
dynaproperties = params_helper.getDynaFields(dynafields)
additional_mentor_form = dynaform.newDynaForm(
dynabase = params['dynabase'],
dynaproperties = dynaproperties,
)
params['additional_mentor_form'] = additional_mentor_form
def manageGet(self, request, template, context, params, entity, **kwargs):
"""Handles the GET request for the project's manage page.
Args:
template: the template used for this view
entity: the student project entity
rest: see base.View.public()
"""
get_dict = request.GET
if 'remove' in get_dict and entity.status == 'accepted':
# get the mentor to remove
fields = {'link_id': get_dict['remove'],
'scope': entity.scope}
mentor = mentor_logic.getForFields(fields, unique=True)
additional_mentors = entity.additional_mentors
# pylint: disable=E1103
if additional_mentors and mentor.key() in additional_mentors:
# remove the mentor from the additional mentors list
additional_mentors.remove(mentor.key())
fields = {'additional_mentors': additional_mentors}
project_logic.updateEntityProperties(entity, fields)
# redirect to the same page without GET arguments
redirect = request.path
return http.HttpResponseRedirect(redirect)
if project_logic.canChangeMentors(entity):
# populate forms with the current mentors set
initial = {'mentor_id': entity.mentor.link_id}
context['mentor_edit_form'] = params['mentor_edit_form'](initial=initial)
context['additional_mentor_form'] = params['additional_mentor_form']()
return responses.respond(request, template, context)
def managePost(self, request, template, context, params, entity, **kwargs):
"""Handles the POST request for the project's manage page.
Args:
template: the template used for this view
entity: the student project entity
rest: see base.View.public()
"""
post_dict = request.POST
if 'set_mentor' in post_dict and project_logic.canChangeMentors(entity):
form = params['mentor_edit_form'](post_dict)
return self._manageSetMentor(request, template, context, params, entity,
form)
elif 'add_additional_mentor' in post_dict and \
project_logic.canChangeMentors(entity):
form = params['additional_mentor_form'](post_dict)
return self._manageAddAdditionalMentor(request, template, context,
params, entity, form)
else:
# unexpected error return the normal page
logging.warning('Unexpected POST data found')
return self.manageGet(request, template, context, params, entity)
def _manageSetMentor(self, request, template, context, params, entity, form):
"""Handles the POST request for changing a Projects's mentor.
Args:
template: the template used for this view
entity: the student project entity
form: instance of the form used to set the mentor
rest: see base.View.public()
"""
if not form.is_valid():
context['mentor_edit_form'] = form
# add an a fresh additional mentors form
context['additional_mentor_form'] = params['additional_mentor_form']()
return responses.respond(request, template, context)
_, fields = forms_helper.collectCleanedFields(form)
# get the mentor from the form
fields = {'link_id': fields['mentor_id'],
'scope': entity.scope,
'status': 'active'}
mentor = mentor_logic.getForFields(fields, unique=True)
# update the project with the assigned mentor
fields = {'mentor': mentor}
additional_mentors = entity.additional_mentors
# pylint: disable=E1103
if additional_mentors and mentor.key() in additional_mentors:
# remove the mentor that is now becoming the primary mentor
additional_mentors.remove(mentor.key())
fields['additional_mentors'] = additional_mentors
# update the project with the new mentor and possible
# new set of additional mentors
project_logic.updateEntityProperties(entity, fields)
# redirect to the same page
redirect = request.path
return http.HttpResponseRedirect(redirect)
def _manageAddAdditionalMentor(self, request, template,
context, params, entity, form):
"""Handles the POST request for changing a Projects's additional mentors.
Args:
template: the template used for this view
entity: the student project entity
form: instance of the form used to add an additional mentor
rest: see base.View.public()
"""
if not form.is_valid():
context['additional_mentor_form'] = form
# add a fresh edit mentor form
initial = {'mentor_id': entity.mentor.link_id}
context['mentor_edit_form'] = params['mentor_edit_form'](initial=initial)
return responses.respond(request, template, context)
_, fields = forms_helper.collectCleanedFields(form)
# get the mentor from the form
fields = {'link_id': fields['mentor_id'],
'scope': entity.scope,
'status': 'active'}
mentor = mentor_logic.getForFields(fields, unique=True)
# add this mentor to the additional mentors
if not entity.additional_mentors:
additional_mentors = [mentor.key()]
else:
additional_mentors = entity.additional_mentors
additional_mentors.append(mentor.key())
fields = {'additional_mentors': additional_mentors}
project_logic.updateEntityProperties(entity, fields)
# redirect to the same page
redirect = request.path
return http.HttpResponseRedirect(redirect)
def getManageOverviewData(self, request, mo_params, org_entity):
"""Returns the manageOverview data.
"""
args = []
fields = {}
idx = lists.getListIndex(request)
if idx == 0:
from soc.modules.gsoc.logic.models.survey import grading_logic as \
grading_survey_logic
from soc.modules.gsoc.logic.models.survey import project_logic as \
project_survey_logic
from soc.modules.gsoc.logic.models.survey_record import grading_logic
from soc.modules.gsoc.logic.models.survey_record import project_logic
program_entity = org_entity.scope
fields = {'scope_path': program_entity.key().id_or_name()}
# count the number of have been active ProjectSurveys
project_surveys = project_survey_logic.getForFields(fields)
project_survey_count = len(project_surveys)
for project_survey in project_surveys:
if not project_survey_logic.hasRecord(project_survey):
project_survey_count = project_survey_count - 1
# count the number of have been active GradingProjectSurveys
grading_surveys = grading_survey_logic.getForFields(fields)
grading_survey_count = len(grading_surveys)
for grading_survey in grading_surveys:
if not grading_survey_logic.hasRecord(grading_survey):
grading_survey_count = grading_survey_count - 1
fields = {'scope': org_entity}
params = mo_params
args = [project_surveys, project_survey_count,
grading_surveys, grading_survey_count]
else:
return lists.getErrorResponse(request, 'idx not valid')
contents = lists.getListData(request, params, fields, args=args)
return lists.getResponse(request, contents)
@decorators.merge_params
@decorators.check_access
def manageOverview(self, request, access_type,
page_name=None, params=None, **kwargs):
"""View that allows Organization Admins to see an overview of
their Organization's Student Projects.
For params see base.View().public()
"""
from soc.modules.gsoc.logic.models.survey import grading_logic as \
grading_survey_logic
from soc.modules.gsoc.logic.models.survey import project_logic as \
project_survey_logic
from soc.modules.gsoc.logic.models.survey_record import grading_logic
from soc.modules.gsoc.logic.models.survey_record import project_logic
# make sure the organization exists
org_entity = org_logic.getFromKeyNameOr404(kwargs['scope_path'])
page_name = '%s %s' % (page_name, org_entity.name)
mo_params = params.copy()
#list all active projects
mo_params['list_description'] = ugettext(
'List of all %s for %s, if you are an Org Admin you can click '
'a project for more actions. Such as reassigning mentors or viewing '
'results of the evaluations.' %(params['name_plural'], org_entity.name)
)
mo_params['public_field_names'] = params['public_field_names'] + [
'Mentor evaluation', 'Student Evaluation']
mo_params['public_field_keys'] = params['public_field_keys'] + [
'mentor_evaluation', 'student_evaluation']
fields = {'scope': org_entity,
'status': ['active', 'inactive']}
org_admin = org_admin_logic.getForFields(fields, unique=True)
# Org Admins get a link to manage the project, others go to public page
if org_admin:
mo_params['public_row_extra'] = lambda entity, *args: {
'link': redirects.getManageRedirect(entity, mo_params)
}
else:
mo_params['public_row_extra'] = lambda entity, *args: {
'link': redirects.getPublicRedirect(entity, mo_params)
}
mo_params['public_field_prefetch'] = ['student', 'mentor', 'scope']
mo_params['public_field_extra'] = lambda entity, ps, psc, gs, gsc: {
'org': entity.scope.name,
'student': '%s (%s)' % (entity.student.name(), entity.student.email),
'mentor': entity.mentor.name(),
'mentor_evaluation': '%d/%d' % (
grading_logic.getQueryForFields({'project': entity}).count(),
gsc),
'student_evaluation': '%d/%d' % (
project_logic.getQueryForFields({'project': entity}).count(),
psc),
}
if lists.isDataRequest(request):
return self.getManageOverviewData(request, mo_params, org_entity)
mo_list = lists.getListGenerator(request, mo_params, idx=0)
contents = [mo_list]
# call the _list method from base to display the list
return self._list(request, mo_params, contents, page_name)
@decorators.merge_params
@decorators.check_access
def stEdit(self, request, access_type,
page_name=None, params=None, **kwargs):
"""View that allows students to edit information about their project.
For params see base.View().public()
"""
try:
entity = self._logic.getFromKeyFieldsOr404(kwargs)
except out_of_band.Error, error:
return responses.errorResponse(
error, request, template=params['error_public'])
# get the context for this webpage
context = responses.getUniversalContext(request)
responses.useJavaScript(context, params['js_uses_all'])
context['page_name'] = page_name
# cancel should go to the public view
params['cancel_redirect'] = redirects.getPublicRedirect(entity, params)
if request.POST:
return self.stEditPost(request, context, params, entity, **kwargs)
else: #request.GET
return self.stEditGet(request, context, params, entity, **kwargs)
def stEditGet(self, request, context, params, entity, **kwargs):
"""Handles the GET request for the student's edit page.
Args:
entity: the student project entity
rest: see base.View.public()
"""
# populate form with the existing entity
form = params['student_edit_form'](instance=entity)
return self._constructResponse(request, entity, context, form, params)
def stEditPost(self, request, context, params, entity, **kwargs):
"""Handles the POST request for the student's edit page.
Args:
entity: the student project entity
rest: see base.View.public()
"""
form = params['student_edit_form'](request.POST)
if not form.is_valid():
return self._constructResponse(request, entity, context, form, params)
_, fields = forms_helper.collectCleanedFields(form)
project_logic.updateEntityProperties(entity, fields)
return self.stEditGet(request, context, params, entity, **kwargs)
view = View()
admin = decorators.view(view.admin)
create = decorators.view(view.create)
delete = decorators.view(view.delete)
edit = decorators.view(view.edit)
list = decorators.view(view.list)
manage = decorators.view(view.manage)
manage_overview = decorators.view(view.manageOverview)
overview = decorators.view(view.overview)
public = decorators.view(view.public)
st_edit = decorators.view(view.stEdit)
export = decorators.view(view.export)
pick = decorators.view(view.pick)
| |
import re
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.http import int_to_base36
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import Site
from emailconfirmation.models import EmailAddress
from timezones.forms import TimeZoneField
from pinax.apps.account.models import Account, PasswordReset
from pinax.apps.account.signals import user_login_attempt, user_signed_up, user_sign_up_attempt
from pinax.apps.account.utils import perform_login, change_password
alnum_re = re.compile(r"^\w+$")
# @@@ might want to find way to prevent settings access globally here.
REQUIRED_EMAIL = getattr(settings, "ACCOUNT_REQUIRED_EMAIL", False)
EMAIL_VERIFICATION = getattr(settings, "ACCOUNT_EMAIL_VERIFICATION", False)
EMAIL_AUTHENTICATION = getattr(settings, "ACCOUNT_EMAIL_AUTHENTICATION", False)
UNIQUE_EMAIL = getattr(settings, "ACCOUNT_UNIQUE_EMAIL", False)
class GroupForm(forms.Form):
def __init__(self, *args, **kwargs):
self.group = kwargs.pop("group", None)
super(GroupForm, self).__init__(*args, **kwargs)
class LoginForm(GroupForm):
password = forms.CharField(
label = _("Password"),
widget = forms.PasswordInput(render_value=False)
)
remember = forms.BooleanField(
label = _("Remember Me"),
help_text = _("If checked you will stay logged in for 3 weeks"),
required = False
)
user = None
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
ordering = []
if EMAIL_AUTHENTICATION:
self.fields["email"] = forms.EmailField(
label = ugettext("Email"),
)
ordering.append("email")
else:
self.fields["username"] = forms.CharField(
label = ugettext("Username"),
max_length = 30,
)
ordering.append("username")
ordering.extend(["password", "remember"])
self.fields.keyOrder = ordering
def user_credentials(self):
"""
Provides the credentials required to authenticate the user for
login.
"""
credentials = {}
if EMAIL_AUTHENTICATION:
credentials["email"] = self.cleaned_data["email"]
else:
credentials["username"] = self.cleaned_data["username"]
credentials["password"] = self.cleaned_data["password"]
return credentials
def clean(self):
if self._errors:
return
user = authenticate(**self.user_credentials())
if user:
if user.is_active:
self.user = user
else:
raise forms.ValidationError(_("This account is currently inactive."))
else:
if EMAIL_AUTHENTICATION:
error = _("The email address and/or password you specified are not correct.")
else:
error = _("The username and/or password you specified are not correct.")
raise forms.ValidationError(error)
return self.cleaned_data
def is_valid(self, *args, **kwargs):
result = super(LoginForm, self).is_valid(*args, **kwargs)
if EMAIL_AUTHENTICATION:
username = self.data["email"]
else:
username = self.data["username"]
user_login_attempt.send(sender=LoginForm, username=username, result=result)
return result
def login(self, request):
perform_login(request, self.user)
if self.cleaned_data["remember"]:
request.session.set_expiry(60 * 60 * 24 * 7 * 3)
else:
request.session.set_expiry(0)
class SignupForm(GroupForm):
username = forms.CharField(
label = _("Username"),
max_length = 30,
widget = forms.TextInput()
)
password1 = forms.CharField(
label = _("Password"),
widget = forms.PasswordInput(render_value=False)
)
password2 = forms.CharField(
label = _("Password (again)"),
widget = forms.PasswordInput(render_value=False)
)
email = forms.EmailField(widget=forms.TextInput())
confirmation_key = forms.CharField(
max_length = 40,
required = False,
widget = forms.HiddenInput()
)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
if REQUIRED_EMAIL or EMAIL_VERIFICATION or EMAIL_AUTHENTICATION:
self.fields["email"].label = ugettext("Email")
self.fields["email"].required = True
else:
self.fields["email"].label = ugettext("Email (optional)")
self.fields["email"].required = False
def clean_username(self):
if not alnum_re.search(self.cleaned_data["username"]):
raise forms.ValidationError(_("Usernames can only contain letters, numbers and underscores."))
try:
User.objects.get(username__iexact=self.cleaned_data["username"])
except User.DoesNotExist:
return self.cleaned_data["username"]
raise forms.ValidationError(_("This username is already taken. Please choose another."))
def clean_email(self):
value = self.cleaned_data["email"]
if UNIQUE_EMAIL or EMAIL_AUTHENTICATION:
try:
User.objects.get(email__iexact=value)
except User.DoesNotExist:
return value
raise forms.ValidationError(_("A user is registered with this email address."))
return value
def clean(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
def create_user(self, username=None, commit=True):
user = User()
if username is None:
raise NotImplementedError("SignupForm.create_user does not handle "
"username=None case. You must override this method.")
user.username = username
user.email = self.cleaned_data["email"].strip().lower()
password = self.cleaned_data.get("password1")
if password:
user.set_password(password)
else:
user.set_unusable_password()
if commit:
user.save()
return user
def login(self, request, user):
# nasty hack to get get_user to work in Django
user.backend = "django.contrib.auth.backends.ModelBackend"
perform_login(request, user)
def save(self, request=None):
# don't assume a username is available. it is a common removal if
# site developer wants to use email authentication.
username = self.cleaned_data.get("username")
email = self.cleaned_data["email"]
if self.cleaned_data["confirmation_key"]:
from friends.models import JoinInvitation # @@@ temporary fix for issue 93
try:
join_invitation = JoinInvitation.objects.get(confirmation_key=self.cleaned_data["confirmation_key"])
confirmed = True
except JoinInvitation.DoesNotExist:
confirmed = False
else:
confirmed = False
# @@@ clean up some of the repetition below -- DRY!
if confirmed:
if email == join_invitation.contact.email:
new_user = self.create_user(username)
join_invitation.accept(new_user) # should go before creation of EmailAddress below
if request:
messages.add_message(request, messages.INFO,
ugettext(u"Your email address has already been verified")
)
# already verified so can just create
EmailAddress(user=new_user, email=email, verified=True, primary=True).save()
else:
new_user = self.create_user(username)
join_invitation.accept(new_user) # should go before creation of EmailAddress below
if email:
if request:
messages.add_message(request, messages.INFO,
ugettext(u"Confirmation email sent to %(email)s") % {
"email": email,
}
)
EmailAddress.objects.add_email(new_user, email)
else:
new_user = self.create_user(username)
if email:
if request and not EMAIL_VERIFICATION:
messages.add_message(request, messages.INFO,
ugettext(u"Confirmation email sent to %(email)s") % {
"email": email,
}
)
EmailAddress.objects.add_email(new_user, email)
if EMAIL_VERIFICATION:
new_user.is_active = False
new_user.save()
self.after_signup(new_user)
return new_user
def is_valid(self, *args, **kwargs):
result = super(SignupForm, self).is_valid(*args, **kwargs)
user_sign_up_attempt.send(
sender=SignupForm,
username=self.data.get("username"),
email=self.data.get("email"),
result=result
)
return result
def after_signup(self, user, **kwargs):
"""
An extension point for subclasses.
"""
user_signed_up.send(sender=SignupForm, user=user)
class OpenIDSignupForm(SignupForm):
def __init__(self, *args, **kwargs):
# remember provided (validated!) OpenID to attach it to the new user
# later.
self.openid = kwargs.pop("openid", None)
# pop these off since they are passed to this method but we can't
# pass them to forms.Form.__init__
kwargs.pop("reserved_usernames", [])
kwargs.pop("no_duplicate_emails", False)
super(OpenIDSignupForm, self).__init__(*args, **kwargs)
# these fields make no sense in OpenID
del self.fields["password1"]
del self.fields["password2"]
class UserForm(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
class AccountForm(UserForm):
def __init__(self, *args, **kwargs):
super(AccountForm, self).__init__(*args, **kwargs)
try:
self.account = Account.objects.get(user=self.user)
except Account.DoesNotExist:
self.account = Account(user=self.user)
class AddEmailForm(UserForm):
email = forms.EmailField(
label = _("Email"),
required = True,
widget = forms.TextInput(attrs={"size": "30"})
)
def clean_email(self):
value = self.cleaned_data["email"]
errors = {
"this_account": _("This email address already associated with this account."),
"different_account": _("This email address already associated with another account."),
}
if UNIQUE_EMAIL:
try:
email = EmailAddress.objects.get(email__iexact=value)
except EmailAddress.DoesNotExist:
return value
if email.user == self.user:
raise forms.ValidationError(errors["this_account"])
raise forms.ValidationError(errors["different_account"])
else:
try:
EmailAddress.objects.get(user=self.user, email__iexact=value)
except EmailAddress.DoesNotExist:
return value
raise forms.ValidationError(errors["this_account"])
def save(self):
return EmailAddress.objects.add_email(self.user, self.cleaned_data["email"])
class ChangePasswordForm(UserForm):
oldpassword = forms.CharField(
label = _("Current Password"),
widget = forms.PasswordInput(render_value=False)
)
password1 = forms.CharField(
label = _("New Password"),
widget = forms.PasswordInput(render_value=False)
)
password2 = forms.CharField(
label = _("New Password (again)"),
widget = forms.PasswordInput(render_value=False)
)
def clean_oldpassword(self):
if not self.user.check_password(self.cleaned_data.get("oldpassword")):
raise forms.ValidationError(_("Please type your current password."))
return self.cleaned_data["oldpassword"]
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
change_password(self.user, self.cleaned_data["password1"])
class SetPasswordForm(UserForm):
password1 = forms.CharField(
label = _("Password"),
widget = forms.PasswordInput(render_value=False)
)
password2 = forms.CharField(
label = _("Password (again)"),
widget = forms.PasswordInput(render_value=False)
)
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
self.user.set_password(self.cleaned_data["password1"])
self.user.save()
class ResetPasswordForm(forms.Form):
email = forms.EmailField(
label = _("Email"),
required = True,
widget = forms.TextInput(attrs={"size":"30"})
)
def clean_email(self):
if EmailAddress.objects.filter(email__iexact=self.cleaned_data["email"], verified=True).count() == 0:
raise forms.ValidationError(_("Email address not verified for any user account"))
return self.cleaned_data["email"]
def save(self, **kwargs):
email = self.cleaned_data["email"]
token_generator = kwargs.get("token_generator", default_token_generator)
for user in User.objects.filter(email__iexact=email):
temp_key = token_generator.make_token(user)
# save it to the password reset model
password_reset = PasswordReset(user=user, temp_key=temp_key)
password_reset.save()
current_site = Site.objects.get_current()
domain = unicode(current_site.domain)
# send the password reset email
subject = _("Password reset email sent")
message = render_to_string("account/password_reset_key_message.txt", {
"user": user,
"uid": int_to_base36(user.id),
"temp_key": temp_key,
"domain": domain,
})
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email])
return self.cleaned_data["email"]
class ResetPasswordKeyForm(forms.Form):
password1 = forms.CharField(
label = _("New Password"),
widget = forms.PasswordInput(render_value=False)
)
password2 = forms.CharField(
label = _("New Password (again)"),
widget = forms.PasswordInput(render_value=False)
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", None)
self.temp_key = kwargs.pop("temp_key", None)
super(ResetPasswordKeyForm, self).__init__(*args, **kwargs)
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
# set the new user password
user = self.user
user.set_password(self.cleaned_data["password1"])
user.save()
# mark password reset object as reset
PasswordReset.objects.filter(temp_key=self.temp_key).update(reset=True)
class ChangeTimezoneForm(AccountForm):
timezone = TimeZoneField(label=_("Timezone"), required=True)
def __init__(self, *args, **kwargs):
super(ChangeTimezoneForm, self).__init__(*args, **kwargs)
self.initial.update({"timezone": self.account.timezone})
def save(self):
self.account.timezone = self.cleaned_data["timezone"]
self.account.save()
class ChangeLanguageForm(AccountForm):
language = forms.ChoiceField(
label = _("Language"),
required = True,
choices = settings.LANGUAGES
)
def __init__(self, *args, **kwargs):
super(ChangeLanguageForm, self).__init__(*args, **kwargs)
self.initial.update({"language": self.account.language})
def save(self):
self.account.language = self.cleaned_data["language"]
self.account.save()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._log_analytics_operations import build_export_request_rate_by_interval_request_initial, build_export_throttled_requests_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LogAnalyticsOperations:
"""LogAnalyticsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _export_request_rate_by_interval_initial(
self,
location: str,
parameters: "_models.RequestRateByIntervalInput",
**kwargs: Any
) -> Optional["_models.LogAnalyticsOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.LogAnalyticsOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RequestRateByIntervalInput')
request = build_export_request_rate_by_interval_request_initial(
location=location,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._export_request_rate_by_interval_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_request_rate_by_interval_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval'} # type: ignore
@distributed_trace_async
async def begin_export_request_rate_by_interval(
self,
location: str,
parameters: "_models.RequestRateByIntervalInput",
**kwargs: Any
) -> AsyncLROPoller["_models.LogAnalyticsOperationResult"]:
"""Export logs that show Api requests made by this subscription in the given time window to show
throttling activities.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:param parameters: Parameters supplied to the LogAnalytics getRequestRateByInterval Api.
:type parameters: ~azure.mgmt.compute.v2018_10_01.models.RequestRateByIntervalInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LogAnalyticsOperationResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_10_01.models.LogAnalyticsOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LogAnalyticsOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._export_request_rate_by_interval_initial(
location=location,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_request_rate_by_interval.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval'} # type: ignore
async def _export_throttled_requests_initial(
self,
location: str,
parameters: "_models.ThrottledRequestsInput",
**kwargs: Any
) -> Optional["_models.LogAnalyticsOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.LogAnalyticsOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ThrottledRequestsInput')
request = build_export_throttled_requests_request_initial(
location=location,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._export_throttled_requests_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_throttled_requests_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests'} # type: ignore
@distributed_trace_async
async def begin_export_throttled_requests(
self,
location: str,
parameters: "_models.ThrottledRequestsInput",
**kwargs: Any
) -> AsyncLROPoller["_models.LogAnalyticsOperationResult"]:
"""Export logs that show total throttled Api requests for this subscription in the given time
window.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:param parameters: Parameters supplied to the LogAnalytics getThrottledRequests Api.
:type parameters: ~azure.mgmt.compute.v2018_10_01.models.ThrottledRequestsInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LogAnalyticsOperationResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_10_01.models.LogAnalyticsOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LogAnalyticsOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._export_throttled_requests_initial(
location=location,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_throttled_requests.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests'} # type: ignore
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class UnbatchTest(test_base.DatasetTestBase, parameterized.TestCase):
def testUnbatchWithUnknownRankInput(self):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]).unbatch()
self.assertDatasetProduces(dataset, range(4))
def testUnbatchScalarDataset(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = (dtypes.int32,) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [(i,) * 3 for i in range(10)])
def testUnbatchNestedDataset(self):
data = dataset_ops.Dataset.from_tensors(
[dataset_ops.Dataset.range(10) for _ in range(10)])
data = data.unbatch().flat_map(lambda x: x)
self.assertDatasetProduces(data, list(range(10)) * 10)
def testUnbatchDatasetWithStrings(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
def testUnbatchDatasetWithSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors(st)
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [
sparse_tensor.SparseTensorValue([[i]], [i], [10]) for i in range(10)
]
self.assertDatasetProduces(data, expected_output=expected_output)
def testUnbatchDatasetWithDenseSparseAndRaggedTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors((list(range(10)), st, rt))
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]),
ragged_factory_ops.constant_value([[i]]))
for i in range(10)]
self.assertDatasetProduces(
data, expected_output=expected_output)
def testUnbatchDatasetWithRaggedTensor(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors(rt)
data = data.unbatch()
data = data.batch(5)
data = data.batch(2)
data = data.unbatch()
expected_output = [
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]]]),
ragged_factory_ops.constant_value([[[5]], [[6]], [[7]], [[8]], [[9]]]),
]
self.assertDatasetProduces(
data, expected_output=expected_output)
def testUnbatchSingleElementTupleDataset(self):
data = tuple([(math_ops.range(10),) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32,),) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [((i,),) * 3 for i in range(10)])
def testUnbatchMultiElementTupleDataset(self):
data = tuple([(math_ops.range(10 * i, 10 * i + 10),
array_ops.fill([10], "hi")) for i in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32, dtypes.string),) * 3
data = data.batch(2)
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data,
[((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")) for i in range(10)])
def testUnbatchEmpty(self):
data = dataset_ops.Dataset.from_tensors(
(constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
constant_op.constant([], shape=[0, 4, 0])))
data = data.unbatch()
self.assertDatasetProduces(data, [])
def testUnbatchStaticShapeMismatch(self):
data = dataset_ops.Dataset.from_tensors((np.arange(7), np.arange(8),
np.arange(9)))
with self.assertRaises(ValueError):
data.unbatch()
# Note: dynamic shape mismatch is graph specific test.
@test_util.run_deprecated_v1
def testSkipEagerUnbatchDynamicShapeMismatch(self):
ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
ph2 = array_ops.placeholder(dtypes.int32, shape=None)
data = dataset_ops.Dataset.from_tensors((ph1, ph2))
data = data.unbatch()
iterator = dataset_ops.make_initializable_iterator(data)
next_element = iterator.get_next()
with self.cached_session() as sess:
# Mismatch in the 0th dimension.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: np.arange(8).astype(np.int32)
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
# No 0th dimension (i.e. scalar value) for one component.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: 7
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
def testUnbatchDatasetWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1], [2], [3]], dtype=np.uint8), 2),
np.tile(np.array([[1], [2], [3], [256]], dtype=np.uint16), 2),
np.tile(np.array([[2], [3], [4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[3], [4], [5], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(4)]
data = dataset_ops.Dataset.from_tensor_slices(components)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, expected_output)
if __name__ == "__main__":
test.main()
| |
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from kmip.core import attributes
from kmip.core import enums
from kmip.core import misc
from kmip.core import objects
from kmip.core import secrets
from kmip.core import utils
from kmip.core.messages import payloads
class TestGetRequestPayload(testtools.TestCase):
"""
Test suite for the Get request payload.
"""
def setUp(self):
super(TestGetRequestPayload, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Sections 3.1.3 and 14.1. The rest of the encoding was built by
# hand.
#
# This encoding matches the following set of values:
# Request Payload
# Unique Identifier - 49a1ca88-6bea-4fb2-b450-7e58802c3038
# Key Format Type - Raw
# Key Compression Type - EC Public Key Type Uncompressed
# Key Wrapping Specification
# Key Wrapping Method - Encrypt
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST Key Wrap
# Encoding Option - No Encoding
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\xC8'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x34\x39\x61\x31\x63\x61\x38\x38\x2D\x36\x62\x65\x61\x2D\x34\x66'
b'\x62\x32\x2D\x62\x34\x35\x30\x2D\x37\x65\x35\x38\x38\x30\x32\x63'
b'\x33\x30\x33\x38\x00\x00\x00\x00'
b'\x42\x00\x42\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x41\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x47\x01\x00\x00\x00\x70'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 3.1.3.
#
# This encoding matches the following set of values:
# Request Payload
# Unique Identifier - 49a1ca88-6bea-4fb2-b450-7e58802c3038
self.partial_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x34\x39\x61\x31\x63\x61\x38\x38\x2D\x36\x62\x65\x61\x2D\x34\x66'
b'\x62\x32\x2D\x62\x34\x35\x30\x2D\x37\x65\x35\x38\x38\x30\x32\x63'
b'\x33\x30\x33\x38\x00\x00\x00\x00'
)
self.empty_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestGetRequestPayload, self).tearDown()
def test_init(self):
"""
Test that a Get request payload can be constructed with no arguments.
"""
payload = payloads.GetRequestPayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.key_format_type)
self.assertEqual(None, payload.key_compression_type)
self.assertEqual(None, payload.key_wrapping_specification)
def test_init_with_args(self):
"""
Test that a Get request payload can be constructed with valid values.
"""
payload = payloads.GetRequestPayload(
unique_identifier='00000000-2222-4444-6666-888888888888',
key_format_type=enums.KeyFormatType.RAW,
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
)
self.assertEqual(
'00000000-2222-4444-6666-888888888888',
payload.unique_identifier
)
self.assertEqual(enums.KeyFormatType.RAW, payload.key_format_type)
self.assertEqual(
enums.KeyCompressionType.EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
payload.key_compression_type
)
self.assertIsInstance(
payload.key_wrapping_specification,
objects.KeyWrappingSpecification
)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
payload.key_wrapping_specification.wrapping_method
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a Get request payload.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegex(
TypeError,
"Unique identifier must be a string.",
payloads.GetRequestPayload,
**kwargs
)
args = (payloads.GetRequestPayload(), 'unique_identifier', 0)
self.assertRaisesRegex(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_key_format_type(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the key format type of a Get request payload.
"""
kwargs = {'key_format_type': 'invalid'}
self.assertRaisesRegex(
TypeError,
"Key format type must be a KeyFormatType enumeration.",
payloads.GetRequestPayload,
**kwargs
)
args = (payloads.GetRequestPayload(), 'key_format_type', 'invalid')
self.assertRaisesRegex(
TypeError,
"Key format type must be a KeyFormatType enumeration.",
setattr,
*args
)
def test_invalid_key_compression_type(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the key compression type of a Get request payload.
"""
kwargs = {'key_compression_type': 'invalid'}
self.assertRaisesRegex(
TypeError,
"Key compression type must be a KeyCompressionType enumeration.",
payloads.GetRequestPayload,
**kwargs
)
args = (
payloads.GetRequestPayload(),
'key_compression_type',
'invalid'
)
self.assertRaisesRegex(
TypeError,
"Key compression type must be a KeyCompressionType enumeration.",
setattr,
*args
)
def test_invalid_key_wrapping_specification(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the key wrapping specification of a Get request payload.
"""
kwargs = {'key_wrapping_specification': 'invalid'}
self.assertRaisesRegex(
TypeError,
"Key wrapping specification must be a KeyWrappingSpecification "
"struct.",
payloads.GetRequestPayload,
**kwargs
)
args = (
payloads.GetRequestPayload(),
'key_wrapping_specification',
'invalid'
)
self.assertRaisesRegex(
TypeError,
"Key wrapping specification must be a KeyWrappingSpecification "
"struct.",
setattr,
*args
)
def test_read(self):
"""
Test that a GetRequestPayload struct can be read from a data stream.
"""
payload = payloads.GetRequestPayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.key_format_type)
self.assertEqual(None, payload.key_compression_type)
self.assertEqual(None, payload.key_wrapping_specification)
payload.read(self.full_encoding)
self.assertEqual(
'49a1ca88-6bea-4fb2-b450-7e58802c3038',
payload.unique_identifier
)
self.assertEqual(enums.KeyFormatType.RAW, payload.key_format_type)
self.assertEqual(
enums.KeyCompressionType.EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
payload.key_compression_type
)
self.assertIsInstance(
payload.key_wrapping_specification,
objects.KeyWrappingSpecification
)
k = payload.key_wrapping_specification
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
k.wrapping_method
)
self.assertIsInstance(
k.encryption_key_information,
objects.EncryptionKeyInformation
)
e = k.encryption_key_information
self.assertEqual(
'100182d5-72b8-47aa-8383-4d97d512e98a',
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertEqual(
k.encoding_option,
enums.EncodingOption.NO_ENCODING
)
def test_read_partial(self):
"""
Test that a GetRequestPayload struct can be read from a partial data
stream.
"""
payload = payloads.GetRequestPayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.key_format_type)
self.assertEqual(None, payload.key_compression_type)
self.assertEqual(None, payload.key_wrapping_specification)
payload.read(self.partial_encoding)
self.assertEqual(
'49a1ca88-6bea-4fb2-b450-7e58802c3038',
payload.unique_identifier
)
self.assertEqual(None, payload.key_format_type)
self.assertEqual(None, payload.key_compression_type)
self.assertEqual(None, payload.key_wrapping_specification)
def test_read_empty(self):
"""
Test that a GetRequestPayload struct can be read from an empty data
stream.
"""
payload = payloads.GetRequestPayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.key_format_type)
self.assertEqual(None, payload.key_compression_type)
self.assertEqual(None, payload.key_wrapping_specification)
payload.read(self.empty_encoding)
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.key_format_type)
self.assertEqual(None, payload.key_compression_type)
self.assertEqual(None, payload.key_wrapping_specification)
def test_write(self):
"""
Test that a GetRequestPayload struct can be written to a data stream.
"""
payload = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
key_format_type=enums.KeyFormatType.RAW,
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined GetRequestPayload struct can be written
to a data stream.
"""
payload = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038'
)
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_empty(self):
"""
Test that an empty GetRequestPayload struct can be written to a data
stream.
"""
payload = payloads.GetRequestPayload()
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.empty_encoding), len(stream))
self.assertEqual(str(self.empty_encoding), str(stream))
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
GetRequestPayload structs with the same data.
"""
a = payloads.GetRequestPayload()
b = payloads.GetRequestPayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
key_format_type=enums.KeyFormatType.RAW,
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
b = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
key_format_type=enums.KeyFormatType.RAW,
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
GetRequestPayload structs with different unique identifiers.
"""
a = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038'
)
b = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c303f'
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_key_format_type(self):
"""
Test that the equality operator returns False when comparing two
GetRequestPayload structs with different key format types.
"""
a = payloads.GetRequestPayload(
key_format_type=enums.KeyFormatType.RAW
)
b = payloads.GetRequestPayload(
key_format_type=enums.KeyFormatType.OPAQUE
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_key_compression_type(self):
"""
Test that the equality operator returns False when comparing two
GetRequestPayload structs with different key compression types.
"""
a = payloads.GetRequestPayload(
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED
)
b = payloads.GetRequestPayload(
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_X9_62_HYBRID
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_key_wrapping_specification(self):
"""
Test that the equality operator returns False when comparing two
GetRequestPayload structs with different key wrapping specifications.
"""
a = payloads.GetRequestPayload(
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT_THEN_MAC_SIGN,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-ffff-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
b = payloads.GetRequestPayload(
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
GetRequestPayload structs with different types.
"""
a = payloads.GetRequestPayload()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
GetRequestPayload structs with the same data.
"""
a = payloads.GetRequestPayload()
b = payloads.GetRequestPayload()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
key_format_type=enums.KeyFormatType.RAW,
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
b = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
key_format_type=enums.KeyFormatType.RAW,
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
GetRequestPayload structs with different unique identifiers.
"""
a = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038'
)
b = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c303f'
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_key_format_type(self):
"""
Test that the inequality operator returns True when comparing two
GetRequestPayload structs with different key format types.
"""
a = payloads.GetRequestPayload(
key_format_type=enums.KeyFormatType.RAW
)
b = payloads.GetRequestPayload(
key_format_type=enums.KeyFormatType.OPAQUE
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_key_compression_type(self):
"""
Test that the equality operator returns False when comparing two
GetRequestPayload structs with different key compression types.
"""
a = payloads.GetRequestPayload(
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED
)
b = payloads.GetRequestPayload(
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_X9_62_HYBRID
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_key_wrapping_specification(self):
"""
Test that the inequality operator returns True when comparing two
GetRequestPayload structs with different key wrapping specifications.
"""
a = payloads.GetRequestPayload(
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT_THEN_MAC_SIGN,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-ffff-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
b = payloads.GetRequestPayload(
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
GetRequestPayload structs with different types.
"""
a = payloads.GetRequestPayload()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to a GetRequestPayload struct.
"""
payload = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
key_format_type=enums.KeyFormatType.RAW,
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
expected = (
"GetRequestPayload("
"unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038', "
"key_format_type=KeyFormatType.RAW, "
"key_compression_type="
"KeyCompressionType.EC_PUBLIC_KEY_TYPE_UNCOMPRESSED, "
"key_wrapping_specification="
"KeyWrappingSpecification("
"wrapping_method=WrappingMethod.ENCRYPT, "
"encryption_key_information=EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.NIST_KEY_WRAP, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature_key_information=None, "
"attribute_names=None, "
"encoding_option=EncodingOption.NO_ENCODING))"
)
observed = repr(payload)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a GetRequestPayload struct.
"""
payload = payloads.GetRequestPayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
key_format_type=enums.KeyFormatType.RAW,
key_compression_type=enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
key_wrapping_specification=objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
)
expected = str({
'unique_identifier': '49a1ca88-6bea-4fb2-b450-7e58802c3038',
'key_format_type': enums.KeyFormatType.RAW,
'key_compression_type': enums.KeyCompressionType.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,
'key_wrapping_specification': objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',
cryptographic_parameters=attributes.
CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
})
observed = str(payload)
self.assertEqual(expected, observed)
class TestGetResponsePayload(testtools.TestCase):
"""
Test suite for the Get response payload.
"""
def setUp(self):
super(TestGetResponsePayload, self).setUp()
# Encoding obtained from the KMIP 1.1 testing document, Section 3.1.3.
#
# This encoding matches the following set of values:
# Response Payload
# Object Type - Symmetric Key
# Unique Identifier - 49a1ca88-6bea-4fb2-b450-7e58802c3038
# Symmetric Key
# Key Block
# Key Format Type - Raw
# Key Value
# Key Material - 0x7367578051012A6D134A855E25C8CD5E4C
# A131455729D3C8
# Cryptographic Algorithm - 3DES
# Cryptographic Length - 168
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\xA8'
b'\x42\x00\x57\x05\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x34\x39\x61\x31\x63\x61\x38\x38\x2D\x36\x62\x65\x61\x2D\x34\x66'
b'\x62\x32\x2D\x62\x34\x35\x30\x2D\x37\x65\x35\x38\x38\x30\x32\x63'
b'\x33\x30\x33\x38\x00\x00\x00\x00'
b'\x42\x00\x8F\x01\x00\x00\x00\x60'
b'\x42\x00\x40\x01\x00\x00\x00\x58'
b'\x42\x00\x42\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x45\x01\x00\x00\x00\x20'
b'\x42\x00\x43\x08\x00\x00\x00\x18'
b'\x73\x67\x57\x80\x51\x01\x2A\x6D\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x2A\x02\x00\x00\x00\x04\x00\x00\x00\xA8\x00\x00\x00\x00'
)
self.partial_encoding_missing_object_type = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\xA0'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x34\x39\x61\x31\x63\x61\x38\x38\x2D\x36\x62\x65\x61\x2D\x34\x66'
b'\x62\x32\x2D\x62\x34\x35\x30\x2D\x37\x65\x35\x38\x38\x30\x32\x63'
b'\x33\x30\x33\x38\x00\x00\x00\x00'
b'\x42\x00\x8F\x01\x00\x00\x00\x60'
b'\x42\x00\x40\x01\x00\x00\x00\x58'
b'\x42\x00\x42\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x45\x01\x00\x00\x00\x20'
b'\x42\x00\x43\x08\x00\x00\x00\x18'
b'\x73\x67\x57\x80\x51\x01\x2A\x6D\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x2A\x02\x00\x00\x00\x04\x00\x00\x00\xA8\x00\x00\x00\x00'
)
self.partial_encoding_missing_unique_id = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x78'
b'\x42\x00\x57\x05\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x8F\x01\x00\x00\x00\x60'
b'\x42\x00\x40\x01\x00\x00\x00\x58'
b'\x42\x00\x42\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x45\x01\x00\x00\x00\x20'
b'\x42\x00\x43\x08\x00\x00\x00\x18'
b'\x73\x67\x57\x80\x51\x01\x2A\x6D\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x2A\x02\x00\x00\x00\x04\x00\x00\x00\xA8\x00\x00\x00\x00'
)
self.partial_encoding_missing_secret = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x40'
b'\x42\x00\x57\x05\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x34\x39\x61\x31\x63\x61\x38\x38\x2D\x36\x62\x65\x61\x2D\x34\x66'
b'\x62\x32\x2D\x62\x34\x35\x30\x2D\x37\x65\x35\x38\x38\x30\x32\x63'
b'\x33\x30\x33\x38\x00\x00\x00\x00'
)
def tearDown(self):
super(TestGetResponsePayload, self).tearDown()
def test_init(self):
"""
Test that a GetRequestPayload struct can be constructed with no
arguments.
"""
payload = payloads.GetResponsePayload()
self.assertEqual(None, payload.object_type)
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.secret)
def test_init_with_args(self):
"""
Test that a GetRequestPayload struct can be constructed with valid
values.
"""
payload = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='11111111-3333-5555-7777-999999999999',
secret=secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(
enums.KeyFormatType.RAW
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
)
self.assertEqual(
enums.ObjectType.SYMMETRIC_KEY,
payload.object_type
)
self.assertEqual(
'11111111-3333-5555-7777-999999999999',
payload.unique_identifier
)
self.assertIsInstance(payload.secret, secrets.SymmetricKey)
self.assertIsInstance(payload.secret.key_block, objects.KeyBlock)
self.assertIsInstance(
payload.secret.key_block.key_format_type,
misc.KeyFormatType
)
self.assertEqual(
enums.KeyFormatType.RAW,
payload.secret.key_block.key_format_type.value
)
self.assertIsInstance(
payload.secret.key_block.key_value,
objects.KeyValue
)
self.assertIsInstance(
payload.secret.key_block.key_value.key_material,
objects.KeyMaterial
)
self.assertEqual(
(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
),
payload.secret.key_block.key_value.key_material.value
)
self.assertIsInstance(
payload.secret.key_block.cryptographic_algorithm,
attributes.CryptographicAlgorithm
)
self.assertEqual(
enums.CryptographicAlgorithm.TRIPLE_DES,
payload.secret.key_block.cryptographic_algorithm.value
)
self.assertIsInstance(
payload.secret.key_block.cryptographic_length,
attributes.CryptographicLength
)
self.assertEqual(
168,
payload.secret.key_block.cryptographic_length.value
)
def test_invalid_object_type(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the object type of a GetResponsePayload struct.
"""
kwargs = {'object_type': 'invalid'}
self.assertRaisesRegex(
TypeError,
"Object type must be an ObjectType enumeration.",
payloads.GetResponsePayload,
**kwargs
)
args = (payloads.GetResponsePayload(), 'object_type', 'invalid')
self.assertRaisesRegex(
TypeError,
"Object type must be an ObjectType enumeration.",
setattr,
*args
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a GetResponsePayload struct.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegex(
TypeError,
"Unique identifier must be a string.",
payloads.GetResponsePayload,
**kwargs
)
args = (payloads.GetResponsePayload(), 'unique_identifier', 0)
self.assertRaisesRegex(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_secret(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the secret of a GetResponsePayload struct.
"""
kwargs = {'secret': 0}
self.assertRaisesRegex(
TypeError,
"Secret must be one of the following structs: Certificate, "
"OpaqueObject, PrivateKey, PublicKey, SecretData, SplitKey, "
"SymmetricKey, Template",
payloads.GetResponsePayload,
**kwargs
)
args = (payloads.GetResponsePayload(), 'secret', 0)
self.assertRaisesRegex(
TypeError,
"Secret must be one of the following structs: Certificate, "
"OpaqueObject, PrivateKey, PublicKey, SecretData, SplitKey, "
"SymmetricKey, Template",
setattr,
*args
)
def test_read(self):
"""
Test that a GetResponsePayload struct can be read from a data stream.
"""
payload = payloads.GetResponsePayload()
self.assertEqual(None, payload.object_type)
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.secret)
payload.read(self.full_encoding)
self.assertEqual(enums.ObjectType.SYMMETRIC_KEY, payload.object_type)
self.assertEqual(
'49a1ca88-6bea-4fb2-b450-7e58802c3038',
payload.unique_identifier
)
self.assertIsInstance(payload.secret, secrets.SymmetricKey)
self.assertIsInstance(payload.secret.key_block, objects.KeyBlock)
self.assertIsInstance(
payload.secret.key_block.key_format_type,
misc.KeyFormatType
)
self.assertEqual(
enums.KeyFormatType.RAW,
payload.secret.key_block.key_format_type.value
)
self.assertIsInstance(
payload.secret.key_block.key_value,
objects.KeyValue
)
self.assertIsInstance(
payload.secret.key_block.key_value.key_material,
objects.KeyMaterial
)
self.assertEqual(
(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
),
payload.secret.key_block.key_value.key_material.value
)
self.assertIsInstance(
payload.secret.key_block.cryptographic_algorithm,
attributes.CryptographicAlgorithm
)
self.assertEqual(
enums.CryptographicAlgorithm.TRIPLE_DES,
payload.secret.key_block.cryptographic_algorithm.value
)
self.assertIsInstance(
payload.secret.key_block.cryptographic_length,
attributes.CryptographicLength
)
self.assertEqual(
168,
payload.secret.key_block.cryptographic_length.value
)
def test_read_missing_object_type(self):
"""
Test that a ValueError gets raised when a required GetResponsePayload
field is missing when decoding the struct.
"""
payload = payloads.GetResponsePayload()
args = (self.partial_encoding_missing_object_type, )
self.assertRaisesRegex(
ValueError,
"Parsed payload encoding is missing the object type field.",
payload.read,
*args
)
def test_read_missing_unique_identifier(self):
"""
Test that a ValueError gets raised when a required GetResponsePayload
field is missing when decoding the struct.
"""
payload = payloads.GetResponsePayload()
args = (self.partial_encoding_missing_unique_id, )
self.assertRaisesRegex(
ValueError,
"Parsed payload encoding is missing the unique identifier field.",
payload.read,
*args
)
def test_read_missing_secret(self):
"""
Test that a ValueError gets raised when a required GetResponsePayload
field is missing when decoding the struct.
"""
payload = payloads.GetResponsePayload()
args = (self.partial_encoding_missing_secret, )
self.assertRaisesRegex(
ValueError,
"Parsed payload encoding is missing the secret field.",
payload.read,
*args
)
def test_write(self):
"""
Test that a GetResponsePayload struct can be written to a data stream.
"""
payload = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
secret=secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(
enums.KeyFormatType.RAW
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
)
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_missing_object_type(self):
"""
Test that a ValueError gets raised when a required GetResponsePayload
field is missing when encoding the struct.
"""
payload = payloads.GetResponsePayload()
stream = utils.BytearrayStream()
args = (stream, )
self.assertRaisesRegex(
ValueError,
"Payload is missing the object type field.",
payload.write,
*args
)
def test_write_missing_unique_identifier(self):
"""
Test that a ValueError gets raised when a required GetResponsePayload
field is missing when encoding the struct.
"""
payload = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY
)
stream = utils.BytearrayStream()
args = (stream, )
self.assertRaisesRegex(
ValueError,
"Payload is missing the unique identifier field.",
payload.write,
*args
)
def test_write_missing_secret(self):
"""
Test that a ValueError gets raised when a required GetResponsePayload
field is missing when encoding the struct.
"""
payload = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038'
)
stream = utils.BytearrayStream()
args = (stream, )
self.assertRaisesRegex(
ValueError,
"Payload is missing the secret field.",
payload.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
GetResponsePayload structs with the same data.
"""
a = payloads.GetResponsePayload()
b = payloads.GetResponsePayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
# TODO (peter-hamilton): Update this once equality is supported for
# SymmetricKeys.
secret = secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(
enums.KeyFormatType.RAW
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
a = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
secret=secret
)
b = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
secret=secret
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_object_type(self):
"""
Test that the equality operator returns False when comparing two
GetResponsePayload structs with different object type fields.
"""
a = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY
)
b = payloads.GetResponsePayload(
object_type=enums.ObjectType.OPAQUE_DATA
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
GetResponsePayload structs with different unique identifier fields.
"""
a = payloads.GetResponsePayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038'
)
b = payloads.GetResponsePayload(
unique_identifier='49a1ca88-6bea-4fb2-ffff-7e58802c3038'
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_secrets(self):
"""
Test that the equality operator returns False when comparing two
GetResponsePayload structs with different secret fields.
"""
# TODO (peter-hamilton): Update this test case once SymmetricKeys
# support proper field-based equality.
a = payloads.GetResponsePayload(
secret=secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(
enums.KeyFormatType.RAW
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
)
b = payloads.GetResponsePayload(
secret=secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(
enums.KeyFormatType.RAW
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operators returns False when comparing two
GetResponsePayload structs with different types.
"""
a = payloads.GetResponsePayload()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
GetResponsePayload structs with the same data.
"""
a = payloads.GetResponsePayload()
b = payloads.GetResponsePayload()
self.assertFalse(a != b)
self.assertFalse(b != a)
# TODO (peter-hamilton): Update this once equality is supported for
# SymmetricKeys.
secret = secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(enums.KeyFormatType.RAW),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
a = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
secret=secret
)
b = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
secret=secret
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_object_type(self):
"""
Test that the inequality operator returns True when comparing two
GetResponsePayload structs with different object type fields.
"""
a = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY
)
b = payloads.GetResponsePayload(
object_type=enums.ObjectType.OPAQUE_DATA
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
GetResponsePayload structs with different unique identifier fields.
"""
a = payloads.GetResponsePayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038'
)
b = payloads.GetResponsePayload(
unique_identifier='49a1ca88-6bea-4fb2-ffff-7e58802c3038'
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_secrets(self):
"""
Test that the inequality operator returns True when comparing two
GetResponsePayload structs with different secret fields.
"""
# TODO (peter-hamilton): Update this test case once SymmetricKeys
# support proper field-based equality.
a = payloads.GetResponsePayload(
secret=secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(
enums.KeyFormatType.RAW
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
)
b = payloads.GetResponsePayload(
secret=secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(
enums.KeyFormatType.RAW
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operators returns True when comparing two
GetResponsePayload structs with different types.
"""
a = payloads.GetResponsePayload()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to a GetResponsePayload struct.
"""
payload = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
secret=secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(
enums.KeyFormatType.RAW
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
)
# TODO (peter-hamilton): Update the secret portion once SymmetricKeys
# support repr/str.
expected = (
"GetResponsePayload("
"object_type=ObjectType.SYMMETRIC_KEY, "
"unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038', "
"secret=Struct()"
")"
)
observed = repr(payload)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a GetResponsePayload struct.
"""
secret = secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(enums.KeyFormatType.RAW),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
b'\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.TRIPLE_DES
),
cryptographic_length=attributes.CryptographicLength(168)
)
)
payload = payloads.GetResponsePayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',
secret=secret
)
# TODO (peter-hamilton): Update the secret portion once SymmetricKeys
# support repr/str.
expected = str({
'object_type': enums.ObjectType.SYMMETRIC_KEY,
'unique_identifier': '49a1ca88-6bea-4fb2-b450-7e58802c3038',
'secret': secret
})
observed = str(payload)
self.assertEqual(expected, observed)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from __future__ import absolute_import
from django.utils.datastructures import SortedDict
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import neutron
neutronclient = neutron.neutronclient
class IKEPolicy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron VPN IKEPolicy."""
def __init__(self, apiresource):
super(IKEPolicy, self).__init__(apiresource)
class IPSecPolicy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron VPN IPSecPolicy."""
def __init__(self, apiresource):
super(IPSecPolicy, self).__init__(apiresource)
class IPSecSiteConnection(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron IPSecSiteConnection."""
def __init__(self, apiresource):
super(IPSecSiteConnection, self).__init__(apiresource)
class VPNService(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron VPNService."""
def __init__(self, apiresource):
super(VPNService, self).__init__(apiresource)
def vpnservice_create(request, **kwargs):
"""Create VPNService
:param request: request context
:param admin_state_up: admin state (default on)
:param name: name for VPNService
:param description: description for VPNService
:param router_id: router id for router of VPNService
:param subnet_id: subnet id for subnet of VPNService
"""
body = {'vpnservice':
{'admin_state_up': kwargs['admin_state_up'],
'name': kwargs['name'],
'description': kwargs['description'],
'router_id': kwargs['router_id'],
'subnet_id': kwargs['subnet_id']}
}
vpnservice = neutronclient(request).create_vpnservice(body).get(
'vpnservice')
return VPNService(vpnservice)
def vpnservice_list(request, **kwargs):
return _vpnservice_list(request, expand_subnet=True, expand_router=True,
expand_conns=True, **kwargs)
def _vpnservice_list(request, expand_subnet=False, expand_router=False,
expand_conns=False, **kwargs):
vpnservices = neutronclient(request).list_vpnservices(
**kwargs).get('vpnservices')
if expand_subnet:
subnets = neutron.subnet_list(request)
subnet_dict = SortedDict((s.id, s) for s in subnets)
for s in vpnservices:
s['subnet_name'] = subnet_dict.get(s['subnet_id']).cidr
if expand_router:
routers = neutron.router_list(request)
router_dict = SortedDict((r.id, r) for r in routers)
for s in vpnservices:
s['router_name'] = router_dict.get(s['router_id']).name_or_id
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs)
for s in vpnservices:
s['ipsecsiteconns'] = [c.id for c in ipsecsiteconns
if c.vpnservice_id == s['id']]
return [VPNService(v) for v in vpnservices]
def vpnservice_get(request, vpnservice_id):
return _vpnservice_get(request, vpnservice_id, expand_subnet=True,
expand_router=True, expand_conns=True)
def _vpnservice_get(request, vpnservice_id, expand_subnet=False,
expand_router=False, expand_conns=False):
vpnservice = neutronclient(request).show_vpnservice(vpnservice_id).get(
'vpnservice')
if expand_subnet:
vpnservice['subnet'] = neutron.subnet_get(
request, vpnservice['subnet_id'])
if expand_router:
vpnservice['router'] = neutron.router_get(
request, vpnservice['router_id'])
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request)
vpnservice['ipsecsiteconns'] = [c for c in ipsecsiteconns
if c.vpnservice_id == vpnservice['id']]
return VPNService(vpnservice)
def vpnservice_update(request, vpnservice_id, **kwargs):
vpnservice = neutronclient(request).update_vpnservice(
vpnservice_id, kwargs).get('vpnservice')
return VPNService(vpnservice)
def vpnservice_delete(request, vpnservice_id):
neutronclient(request).delete_vpnservice(vpnservice_id)
def ikepolicy_create(request, **kwargs):
"""Create IKEPolicy
:param request: request context
:param name: name for IKEPolicy
:param description: description for IKEPolicy
:param auth_algorithm: authorization algorithm for IKEPolicy
:param encryption_algorithm: encryption algorithm for IKEPolicy
:param ike_version: IKE version for IKEPolicy
:param lifetime: Lifetime Units and Value for IKEPolicy
:param pfs: Perfect Forward Secrecy for IKEPolicy
:param phase1_negotiation_mode: IKE Phase1 negotiation mode for IKEPolicy
"""
body = {'ikepolicy':
{'name': kwargs['name'],
'description': kwargs['description'],
'auth_algorithm': kwargs['auth_algorithm'],
'encryption_algorithm': kwargs['encryption_algorithm'],
'ike_version': kwargs['ike_version'],
'lifetime': kwargs['lifetime'],
'pfs': kwargs['pfs'],
'phase1_negotiation_mode': kwargs['phase1_negotiation_mode']}
}
ikepolicy = neutronclient(request).create_ikepolicy(body).get(
'ikepolicy')
return IKEPolicy(ikepolicy)
def ikepolicy_list(request, **kwargs):
return _ikepolicy_list(request, expand_conns=True, **kwargs)
def _ikepolicy_list(request, expand_conns=False, **kwargs):
ikepolicies = neutronclient(request).list_ikepolicies(
**kwargs).get('ikepolicies')
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs)
for p in ikepolicies:
p['ipsecsiteconns'] = [c.id for c in ipsecsiteconns
if c.ikepolicy_id == p['id']]
return [IKEPolicy(v) for v in ikepolicies]
def ikepolicy_get(request, ikepolicy_id):
return _ikepolicy_get(request, ikepolicy_id, expand_conns=True)
def _ikepolicy_get(request, ikepolicy_id, expand_conns=False):
ikepolicy = neutronclient(request).show_ikepolicy(
ikepolicy_id).get('ikepolicy')
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request)
ikepolicy['ipsecsiteconns'] = [c for c in ipsecsiteconns
if c.ikepolicy_id == ikepolicy['id']]
return IKEPolicy(ikepolicy)
def ikepolicy_update(request, ikepolicy_id, **kwargs):
ikepolicy = neutronclient(request).update_ikepolicy(
ikepolicy_id, kwargs).get('ikepolicy')
return IKEPolicy(ikepolicy)
def ikepolicy_delete(request, ikepolicy_id):
neutronclient(request).delete_ikepolicy(ikepolicy_id)
def ipsecpolicy_create(request, **kwargs):
"""Create IPSecPolicy
:param request: request context
:param name: name for IPSecPolicy
:param description: description for IPSecPolicy
:param auth_algorithm: authorization algorithm for IPSecPolicy
:param encapsulation_mode: encapsulation mode for IPSecPolicy
:param encryption_algorithm: encryption algorithm for IPSecPolicy
:param lifetime: Lifetime Units and Value for IPSecPolicy
:param pfs: Perfect Forward Secrecy for IPSecPolicy
:param transform_protocol: Transform Protocol for IPSecPolicy
"""
body = {'ipsecpolicy':
{'name': kwargs['name'],
'description': kwargs['description'],
'auth_algorithm': kwargs['auth_algorithm'],
'encapsulation_mode': kwargs['encapsulation_mode'],
'encryption_algorithm': kwargs['encryption_algorithm'],
'lifetime': kwargs['lifetime'],
'pfs': kwargs['pfs'],
'transform_protocol': kwargs['transform_protocol']}
}
ipsecpolicy = neutronclient(request).create_ipsecpolicy(body).get(
'ipsecpolicy')
return IPSecPolicy(ipsecpolicy)
def ipsecpolicy_list(request, **kwargs):
return _ipsecpolicy_list(request, expand_conns=True, **kwargs)
def _ipsecpolicy_list(request, expand_conns=False, **kwargs):
ipsecpolicies = neutronclient(request).list_ipsecpolicies(
**kwargs).get('ipsecpolicies')
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs)
for p in ipsecpolicies:
p['ipsecsiteconns'] = [c.id for c in ipsecsiteconns
if c.ipsecpolicy_id == p['id']]
return [IPSecPolicy(v) for v in ipsecpolicies]
def ipsecpolicy_get(request, ipsecpolicy_id):
return _ipsecpolicy_get(request, ipsecpolicy_id, expand_conns=True)
def _ipsecpolicy_get(request, ipsecpolicy_id, expand_conns=False):
ipsecpolicy = neutronclient(request).show_ipsecpolicy(
ipsecpolicy_id).get('ipsecpolicy')
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request)
ipsecpolicy['ipsecsiteconns'] = [c for c in ipsecsiteconns
if c.ipsecpolicy_id == ipsecpolicy['id']]
return IPSecPolicy(ipsecpolicy)
def ipsecpolicy_update(request, ipsecpolicy_id, **kwargs):
ipsecpolicy = neutronclient(request).update_ipsecpolicy(
ipsecpolicy_id, kwargs).get('ipsecpolicy')
return IPSecPolicy(ipsecpolicy)
def ipsecpolicy_delete(request, ipsecpolicy_id):
neutronclient(request).delete_ipsecpolicy(ipsecpolicy_id)
def ipsecsiteconnection_create(request, **kwargs):
"""Create IPSecSiteConnection
:param request: request context
:param name: name for IPSecSiteConnection
:param description: description for IPSecSiteConnection
:param dpd: dead peer detection action, interval and timeout
:param ikepolicy_id: IKEPolicy associated with this connection
:param initiator: initiator state
:param ipsecpolicy_id: IPsecPolicy associated with this connection
:param mtu: MTU size for the connection
:param peer_address: Peer gateway public address
:param peer_cidrs: remote subnet(s) in CIDR format
:param peer_id: Peer router identity for authentication"
:param psk: Pre-Shared Key string
:param vpnservice_id: VPNService associated with this connection
:param admin_state_up: admin state (default on)
"""
body = {'ipsec_site_connection':
{'name': kwargs['name'],
'description': kwargs['description'],
'dpd': kwargs['dpd'],
'ikepolicy_id': kwargs['ikepolicy_id'],
'initiator': kwargs['initiator'],
'ipsecpolicy_id': kwargs['ipsecpolicy_id'],
'mtu': kwargs['mtu'],
'peer_address': kwargs['peer_address'],
'peer_cidrs': kwargs['peer_cidrs'],
'peer_id': kwargs['peer_id'],
'psk': kwargs['psk'],
'vpnservice_id': kwargs['vpnservice_id'],
'admin_state_up': kwargs['admin_state_up']}
}
ipsecsiteconnection = neutronclient(request).create_ipsec_site_connection(
body).get('ipsec_site_connection')
return IPSecSiteConnection(ipsecsiteconnection)
@memoized
def ipsecsiteconnection_list(request, **kwargs):
return _ipsecsiteconnection_list(request, expand_ikepolicies=True,
expand_ipsecpolicies=True,
expand_vpnservices=True, **kwargs)
@memoized
def _ipsecsiteconnection_list(request, expand_ikepolicies=False,
expand_ipsecpolicies=False,
expand_vpnservices=False, **kwargs):
ipsecsiteconnections = neutronclient(request).list_ipsec_site_connections(
**kwargs).get('ipsec_site_connections')
if expand_ikepolicies:
ikepolicies = _ikepolicy_list(request, **kwargs)
policy_dict = SortedDict((p.id, p) for p in ikepolicies)
for c in ipsecsiteconnections:
c['ikepolicy_name'] = policy_dict.get(c['ikepolicy_id']).name_or_id
if expand_ipsecpolicies:
ipsecpolicies = _ipsecpolicy_list(request, **kwargs)
policy_dict = SortedDict((p.id, p) for p in ipsecpolicies)
for c in ipsecsiteconnections:
c['ipsecpolicy_name'] = policy_dict.get(c['ipsecpolicy_id']
).name_or_id
if expand_vpnservices:
vpnservices = _vpnservice_list(request, **kwargs)
service_dict = SortedDict((s.id, s) for s in vpnservices)
for c in ipsecsiteconnections:
c['vpnservice_name'] = service_dict.get(c['vpnservice_id']
).name_or_id
return [IPSecSiteConnection(v) for v in ipsecsiteconnections]
def ipsecsiteconnection_get(request, ipsecsiteconnection_id):
return _ipsecsiteconnection_get(request, ipsecsiteconnection_id,
expand_ikepolicies=True,
expand_ipsecpolicies=True,
expand_vpnservices=True)
def _ipsecsiteconnection_get(request, ipsecsiteconnection_id,
expand_ikepolicies, expand_ipsecpolicies,
expand_vpnservices):
ipsecsiteconnection = neutronclient(request).show_ipsec_site_connection(
ipsecsiteconnection_id).get('ipsec_site_connection')
if expand_ikepolicies:
ipsecsiteconnection['ikepolicy'] = _ikepolicy_get(
request, ipsecsiteconnection['ikepolicy_id'])
if expand_ipsecpolicies:
ipsecsiteconnection['ipsecpolicy'] = _ipsecpolicy_get(
request, ipsecsiteconnection['ipsecpolicy_id'])
if expand_vpnservices:
ipsecsiteconnection['vpnservice'] = _vpnservice_get(
request, ipsecsiteconnection['vpnservice_id'])
return IPSecSiteConnection(ipsecsiteconnection)
def ipsecsiteconnection_update(request, ipsecsiteconnection_id, **kwargs):
ipsecsiteconnection = neutronclient(request).update_ipsec_site_connection(
ipsecsiteconnection_id, kwargs).get('ipsec_site_connection')
return IPSecSiteConnection(ipsecsiteconnection)
def ipsecsiteconnection_delete(request, ipsecsiteconnection_id):
neutronclient(request).delete_ipsec_site_connection(ipsecsiteconnection_id)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import *
from marvin.lib.common import list_hosts
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from OpenSSL.crypto import FILETYPE_PEM, verify, X509
PUBKEY_VERIFY=True
try:
from OpenSSL.crypto import load_publickey
except ImportError:
PUBKEY_VERIFY=False
class TestCARootProvider(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestCARootProvider, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.cleanup = []
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def getUpSystemVMHosts(self, hostId=None):
hosts = list_hosts(
self.apiclient,
type='SecondaryStorageVM',
state='Up',
resourcestate='Enabled',
id=hostId
)
return hosts
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_list_ca_providers(self):
"""
Tests default ca providers list
"""
cmd = listCAProviders.listCAProvidersCmd()
response = self.apiclient.listCAProviders(cmd)
self.assertEqual(len(response), 1)
self.assertEqual(response[0].name, 'root')
def getCaCertificate(self):
cmd = listCaCertificate.listCaCertificateCmd()
cmd.provider = 'root'
response = self.apiclient.listCaCertificate(cmd)
return response.cacertificates.certificate
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_list_ca_certificate(self):
"""
Tests the ca certificate
"""
certificate = self.getCaCertificate()
self.assertTrue(len(certificate) > 0)
cert = x509.load_pem_x509_certificate(str(certificate), default_backend())
self.assertEqual(cert.signature_hash_algorithm.name, 'sha256')
self.assertEqual(cert.issuer.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[0].value, 'ca.cloudstack.apache.org')
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_issue_certificate_without_csr(self):
"""
Tests issuance of a certificate
"""
cmd = issueCertificate.issueCertificateCmd()
cmd.domain = 'apache.org,cloudstack.apache.org'
cmd.ipaddress = '10.1.1.1,10.2.2.2'
cmd.provider = 'root'
response = self.apiclient.issueCertificate(cmd)
self.assertTrue(len(response.privatekey) > 0)
self.assertTrue(len(response.cacertificates) > 0)
self.assertTrue(len(response.certificate) > 0)
cert = x509.load_pem_x509_certificate(str(response.certificate), default_backend())
# Validate basic certificate attributes
self.assertEqual(cert.signature_hash_algorithm.name, 'sha256')
self.assertEqual(cert.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[0].value, 'apache.org')
# Validate alternative names
altNames = cert.extensions.get_extension_for_oid(x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
for domain in cmd.domain.split(','):
self.assertTrue(domain in altNames.value.get_values_for_type(x509.DNSName))
for address in cmd.ipaddress.split(','):
self.assertTrue(address in map(lambda x: str(x), altNames.value.get_values_for_type(x509.IPAddress)))
# Validate certificate against CA public key
global PUBKEY_VERIFY
if not PUBKEY_VERIFY:
return
caCert = x509.load_pem_x509_certificate(str(self.getCaCertificate()), default_backend())
x = X509()
x.set_pubkey(load_publickey(FILETYPE_PEM, str(caCert.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo))))
verify(x, cert.signature, cert.tbs_certificate_bytes, cert.signature_hash_algorithm.name)
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_issue_certificate_with_csr(self):
"""
Tests issuance of a certificate
"""
cmd = issueCertificate.issueCertificateCmd()
cmd.csr = "-----BEGIN CERTIFICATE REQUEST-----\nMIIBHjCByQIBADBkMQswCQYDVQQGEwJJTjELMAkGA1UECAwCSFIxETAPBgNVBAcM\nCEd1cnVncmFtMQ8wDQYDVQQKDAZBcGFjaGUxEzARBgNVBAsMCkNsb3VkU3RhY2sx\nDzANBgNVBAMMBnYtMS1WTTBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQD46KFWKYrJ\nF43Y1oqWUfrl4mj4Qm05Bgsi6nuigZv7ufiAKK0nO4iJKdRa2hFMUvBi2/bU3IyY\nNvg7cdJsn4K9AgMBAAGgADANBgkqhkiG9w0BAQUFAANBAIta9glu/ZSjA/ncyXix\nyDOyAKmXXxsRIsdrEuIzakUuJS7C8IG0FjUbDyIaiwWQa5x+Lt4oMqCmpNqRzaGP\nfOo=\n-----END CERTIFICATE REQUEST-----"
cmd.provider = 'root'
response = self.apiclient.issueCertificate(cmd)
self.assertTrue(response.privatekey is None)
self.assertTrue(len(response.cacertificates) > 0)
self.assertTrue(len(response.certificate) > 0)
cert = x509.load_pem_x509_certificate(str(response.certificate), default_backend())
# Validate basic certificate attributes
self.assertEqual(cert.signature_hash_algorithm.name, 'sha256')
self.assertEqual(cert.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[0].value, 'v-1-VM')
# Validate certificate against CA public key
global PUBKEY_VERIFY
if not PUBKEY_VERIFY:
return
caCert = x509.load_pem_x509_certificate(str(self.getCaCertificate()), default_backend())
x = X509()
x.set_pubkey(load_publickey(FILETYPE_PEM, str(caCert.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo))))
verify(x, cert.signature, cert.tbs_certificate_bytes, cert.signature_hash_algorithm.name)
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_revoke_certificate(self):
"""
Tests certificate revocation
"""
cmd = revokeCertificate.revokeCertificateCmd()
cmd.serial = 'abc123' # hex value
cmd.cn = 'example.com'
cmd.provider = 'root'
self.dbclient.execute("delete from crl where serial='%s'" % cmd.serial)
response = self.apiclient.revokeCertificate(cmd)
self.assertTrue(response.success)
crl = self.dbclient.execute("select serial, cn from crl where serial='%s'" % cmd.serial)[0]
self.assertEqual(crl[0], cmd.serial)
self.assertEqual(crl[1], cmd.cn)
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_provision_certificate(self):
"""
Tests certificate provisioning
"""
hosts = self.getUpSystemVMHosts()
if not hosts or len(hosts) < 1:
raise self.skipTest("No Up systemvm hosts found, skipping test")
host = hosts[0]
cmd = provisionCertificate.provisionCertificateCmd()
cmd.hostid = host.id
cmd.reconnect = True
cmd.provider = 'root'
response = self.apiclient.provisionCertificate(cmd)
self.assertTrue(response.success)
if self.hypervisor.lower() == 'simulator':
hosts = self.getUpSystemVMHosts(host.id)
self.assertTrue(hosts is None or len(hosts) == 0)
else:
def checkHostIsUp(hostId):
hosts = self.getUpSystemVMHosts(host.id)
return (hosts is not None), hosts
result, hosts = wait_until(1, 30, checkHostIsUp, host.id)
if result:
self.assertTrue(len(hosts) == 1)
else:
self.fail("Failed to have systemvm host in Up state after cert provisioning")
| |
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is an implementation of the Port interface that overrides other
ports and changes the Driver binary to "MockDRT".
The MockDRT objects emulate what a real DRT would do. In particular, they
return the output a real DRT would return for a given test, assuming that
test actually passes (except for reftests, which currently cause the
MockDRT to crash).
"""
import base64
import logging
import optparse
import os
import sys
import types
# Since we execute this script directly as part of the unit tests, we need to ensure
# that Tools/Scripts is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.append(script_dir)
from webkitpy.common import read_checksum_from_png
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.port.factory import PortFactory
_log = logging.getLogger(__name__)
class MockDRTPort(object):
port_name = 'mock'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
return port_name
def __init__(self, host, port_name, **kwargs):
self.__delegate = PortFactory(host).get(port_name.replace('mock-', ''), **kwargs)
self.__delegate_driver_class = self.__delegate._driver_class
self.__delegate._driver_class = types.MethodType(self._driver_class, self.__delegate)
def __getattr__(self, name):
return getattr(self.__delegate, name)
def check_build(self, needs_http, printer):
return True
def check_sys_deps(self, needs_http):
return True
def _driver_class(self, delegate):
return self._mocked_driver_maker
def _mocked_driver_maker(self, port, worker_number, pixel_tests, no_timeout=False):
path_to_this_file = self.host.filesystem.abspath(__file__.replace('.pyc', '.py'))
driver = self.__delegate_driver_class()(self, worker_number, pixel_tests, no_timeout)
driver.cmd_line = self._overriding_cmd_line(driver.cmd_line,
self.__delegate._path_to_driver(),
sys.executable,
path_to_this_file,
self.__delegate.name())
return driver
@staticmethod
def _overriding_cmd_line(original_cmd_line, driver_path, python_exe, this_file, port_name):
def new_cmd_line(pixel_tests, per_test_args):
cmd_line = original_cmd_line(pixel_tests, per_test_args)
index = cmd_line.index(driver_path)
cmd_line[index:index + 1] = [python_exe, this_file, '--platform', port_name]
return cmd_line
return new_cmd_line
def start_helper(self):
pass
def start_http_server(self, additional_dirs, number_of_servers):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_helper(self):
pass
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def _make_wdiff_available(self):
self.__delegate._wdiff_available = True
def setup_environ_for_server(self, server_name):
env = self.__delegate.setup_environ_for_server()
# We need to propagate PATH down so the python code can find the checkout.
env['PATH'] = os.environ['PATH']
return env
def lookup_virtual_test_args(self, test_name):
suite = self.__delegate.lookup_virtual_suite(test_name)
return suite.args + ['--virtual-test-suite-name', suite.name, '--virtual-test-suite-base', suite.base]
def main(argv, host, stdin, stdout, stderr):
"""Run the tests."""
options, args = parse_options(argv)
drt = MockDRT(options, args, host, stdin, stdout, stderr)
return drt.run()
def parse_options(argv):
# We do custom arg parsing instead of using the optparse module
# because we don't want to have to list every command line flag DRT
# accepts, and optparse complains about unrecognized flags.
def get_arg(arg_name):
if arg_name in argv:
index = argv.index(arg_name)
return argv[index + 1]
return None
options = optparse.Values({
'actual_directory': get_arg('--actual-directory'),
'platform': get_arg('--platform'),
'virtual_test_suite_base': get_arg('--virtual-test-suite-base'),
'virtual_test_suite_name': get_arg('--virtual-test-suite-name'),
})
return (options, argv)
class MockDRT(object):
def __init__(self, options, args, host, stdin, stdout, stderr):
self._options = options
self._args = args
self._host = host
self._stdout = stdout
self._stdin = stdin
self._stderr = stderr
port_name = None
if options.platform:
port_name = options.platform
self._port = PortFactory(host).get(port_name=port_name, options=options)
self._driver = self._port.create_driver(0)
def run(self):
while True:
line = self._stdin.readline()
if not line:
return 0
driver_input = self.input_from_line(line)
dirname, basename = self._port.split_test(driver_input.test_name)
is_reftest = (self._port.reference_files(driver_input.test_name) or
self._port.is_reference_html_file(self._port._filesystem, dirname, basename))
output = self.output_for_test(driver_input, is_reftest)
self.write_test_output(driver_input, output, is_reftest)
def input_from_line(self, line):
vals = line.strip().split("'")
uri = vals[0]
checksum = None
should_run_pixel_tests = False
if len(vals) == 2 and vals[1] == '--pixel-test':
should_run_pixel_tests = True
elif len(vals) == 3 and vals[1] == '--pixel-test':
should_run_pixel_tests = True
checksum = vals[2]
elif len(vals) != 1:
raise NotImplementedError
if uri.startswith('http://') or uri.startswith('https://'):
test_name = self._driver.uri_to_test(uri)
else:
test_name = self._port.relative_test_filename(uri)
return DriverInput(test_name, 0, checksum, should_run_pixel_tests, args=[])
def output_for_test(self, test_input, is_reftest):
port = self._port
if self._options.virtual_test_suite_name:
test_input.test_name = test_input.test_name.replace(self._options.virtual_test_suite_base, self._options.virtual_test_suite_name)
actual_text = port.expected_text(test_input.test_name)
actual_audio = port.expected_audio(test_input.test_name)
actual_image = None
actual_checksum = None
if is_reftest:
# Make up some output for reftests.
actual_text = 'reference text\n'
actual_checksum = 'mock-checksum'
actual_image = 'blank'
if test_input.test_name.endswith('-mismatch.html'):
actual_text = 'not reference text\n'
actual_checksum = 'not-mock-checksum'
actual_image = 'not blank'
elif test_input.should_run_pixel_test and test_input.image_hash:
actual_checksum = port.expected_checksum(test_input.test_name)
actual_image = port.expected_image(test_input.test_name)
if self._options.actual_directory:
actual_path = port._filesystem.join(self._options.actual_directory, test_input.test_name)
root, _ = port._filesystem.splitext(actual_path)
text_path = root + '-actual.txt'
if port._filesystem.exists(text_path):
actual_text = port._filesystem.read_binary_file(text_path)
audio_path = root + '-actual.wav'
if port._filesystem.exists(audio_path):
actual_audio = port._filesystem.read_binary_file(audio_path)
image_path = root + '-actual.png'
if port._filesystem.exists(image_path):
actual_image = port._filesystem.read_binary_file(image_path)
with port._filesystem.open_binary_file_for_reading(image_path) as filehandle:
actual_checksum = read_checksum_from_png.read_checksum(filehandle)
return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
def write_test_output(self, test_input, output, is_reftest):
if output.audio:
self._stdout.write('Content-Type: audio/wav\n')
self._stdout.write('Content-Transfer-Encoding: base64\n')
self._stdout.write(base64.b64encode(output.audio))
self._stdout.write('\n')
else:
self._stdout.write('Content-Type: text/plain\n')
# FIXME: Note that we don't ensure there is a trailing newline!
# This mirrors actual (Mac) DRT behavior but is a bug.
if output.text:
self._stdout.write(output.text)
self._stdout.write('#EOF\n')
if test_input.should_run_pixel_test and output.image_hash:
self._stdout.write('\n')
self._stdout.write('ActualHash: %s\n' % output.image_hash)
self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash)
if output.image_hash != test_input.image_hash:
self._stdout.write('Content-Type: image/png\n')
self._stdout.write('Content-Length: %s\n' % len(output.image))
self._stdout.write(output.image)
self._stdout.write('#EOF\n')
self._stdout.flush()
self._stderr.write('#EOF\n')
self._stderr.flush()
if __name__ == '__main__':
# Note that the Mock in MockDRT refers to the fact that it is emulating a
# real DRT, and as such, it needs access to a real SystemHost, not a MockSystemHost.
sys.exit(main(sys.argv[1:], SystemHost(), sys.stdin, sys.stdout, sys.stderr))
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import django
from django.core.urlresolvers import reverse
from django.forms import widgets
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
VOLUME_INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_VOLUMES_TAB_URL = reverse('horizon:project:volumes:volumes_tab')
class VolumeViewTests(test.TestCase):
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'no_source_type',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': self.images.first().id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_get',
'volume_get',
'volume_type_list'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'volume_snapshot_get',
'volume_snapshot_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A copy of a volume',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'volume_source',
'volume_source': volume.id}
cinder.volume_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volumes.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest),
volume.id).AndReturn(self.cinder_volumes.first())
cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones').AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
redirect_url = VOLUME_VOLUMES_TAB_URL
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_snapshot_get',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'snapshot_source',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_type_list',
'volume_get'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 40,
'type': '',
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'volume_list',
'volume_snapshot_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 30,
'type': '',
'volume_source_type': 'image_source',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)) \
.AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_under_image_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 1, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
# in django 1.6 filesizeformat replaces all spaces with
# non-breaking space characters
if django.VERSION >= (1, 6):
msg = (u"The volume size cannot be less than the "
u"image size (20.0\xa0GB)")
else:
msg = (u"The volume size cannot be less than the "
u"image size (20.0 GB)")
self.assertFormError(res, 'form', None, msg)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_under_image_min_disk_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.get(name="protected_images")
image.min_disk = 30
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 5, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"image minimum disk size (30GB)")
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_gb_used_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 80,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 20GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_number_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': len(self.cinder_volumes.list())}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest)).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_backup_supported',
'volume_delete',),
api.nova: ('server_list',)})
def test_delete_volume(self):
volumes = self.cinder_volumes.list()
volume = self.cinder_volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_delete(IsA(http.HttpRequest), volume.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertIn("Scheduled deletion of Volume: Volume name",
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_backup_supported',
'volume_delete',),
api.nova: ('server_list',)})
def test_delete_volume_error_existing_snapshot(self):
volume = self.cinder_volumes.first()
volumes = self.cinder_volumes.list()
formData = {'action':
'volumes__delete__%s' % volume.id}
exc = self.exceptions.cinder.__class__(400,
"error: dependent snapshots")
cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_delete(IsA(http.HttpRequest), volume.id).\
AndRaise(exc)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'Unable to delete volume "%s". '
u'One or more snapshots depend on it.' %
volume.name)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '/dev/vdb',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
msg = 'Volume %s on instance %s' % (volume.name, servers[0].name)
self.assertContains(res, msg)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
1)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments_auto_device_name(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.HiddenInput))
@test.create_stubs({cinder: ('volume_get',),
api.nova: ('server_list',)})
def test_edit_attachments_attached_volume(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
server = servers[0]
volume = self.cinder_volumes.list()[0]
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_create_snapshot_button_disabled_when_quota_exceeded(self):
limits = {'maxTotalSnapshots': 1}
limits['totalSnapshotsUsed'] = limits['maxTotalSnapshots']
volume = self.cinder_volumes.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).AndReturn(limits)
self.mox.ReplayAll()
create_link = tables.CreateSnapshot()
url = reverse(create_link.get_link_url(), args=[volume.id])
res_url = VOLUME_INDEX_URL + \
"?action=row_update&table=volumes&obj_id=" + volume.id
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
classes = list(create_link.get_default_classes())\
+ list(create_link.classes)
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' class=\"%s disabled\" "\
"id=\"volumes__row_%s__action_snapshots\">%s</a>" \
% (url, " ".join(classes), volume.id, link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create snapshot button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_backup_supported',),
api.nova: ('server_list',)})
def test_create_button_disabled_when_quota_exceeded(self):
limits = self.cinder_limits['absolute']
limits['totalVolumesUsed'] = limits['maxTotalVolumes']
volumes = self.cinder_volumes.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
self.assertTemplateUsed(res, 'project/volumes/index.html')
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, self.cinder_volumes.list())
create_link = tables.CreateVolume()
url = create_link.get_link_url()
classes = list(create_link.get_default_classes())\
+ list(create_link.classes)
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='volumes__action_create' data-update-url=" \
"'/project/volumes/?action=create&table=volumes'> "\
"<span class='glyphicon glyphicon-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({cinder: ('volume_get',),
api.nova: ('server_get',)})
def test_detail_view(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res, "<h2>Volume Details: Volume name</h2>",
1, 200)
self.assertContains(res, "<dd>Volume name</dd>", 1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dd>40 GB</dd>", 1, 200)
self.assertContains(res,
("<a href=\"/project/instances/1/\">%s</a>"
% server.name),
1,
200)
self.assertNoMessages()
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_get_data(self):
volume = self.cinder_volumes.get(name='v2_volume')
volume._apiresource.name = ""
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL + \
"?action=row_update&table=volumes&obj_id=" + volume.id
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertEqual(volume.name, volume.id)
@test.create_stubs({cinder: ('volume_get',)})
def test_detail_view_with_exception(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_get',)})
def test_update_volume(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
volume.description)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': volume.description}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_upload_to_image',
'volume_get')})
def test_upload_to_image(self):
volume = self.cinder_volumes.get(name='v2_volume')
loaded_resp = {'container_format': 'bare',
'disk_format': 'raw',
'id': '741fe2ac-aa2f-4cec-82a9-4994896b43fb',
'image_id': '2faa080b-dd56-4bf0-8f0a-0d4627d8f306',
'image_name': 'test',
'size': '2',
'status': 'uploading'}
form_data = {'id': volume.id,
'name': volume.name,
'image_name': 'testimage',
'force': True,
'container_format': 'bare',
'disk_format': 'raw'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_upload_to_image(
IsA(http.HttpRequest),
form_data['id'],
form_data['force'],
form_data['image_name'],
form_data['container_format'],
form_data['disk_format']).AndReturn(loaded_resp)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:upload_to_image',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',
'volume_extend')})
def test_extend_volume(self):
volume = self.cinder_volumes.first()
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 100}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
cinder.volume_extend(IsA(http.HttpRequest),
volume.id,
formData['new_size']).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_wrong_size(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 10}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, 'form', None,
"New size must be greater than "
"current size.")
@test.create_stubs({cinder: ('volume_get',
'retype_supported'),
api.nova: ('server_get',)})
def test_retype_volume_not_supported_no_action_item(self):
volume = self.cinder_volumes.get(name='my_volume')
server = self.servers.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.retype_supported().AndReturn(False)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
self.mox.ReplayAll()
url = VOLUME_INDEX_URL + \
"?action=row_update&table=volumes&obj_id=" + volume.id
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertNotContains(res, 'Change Volume Type')
self.assertNotContains(res, 'retype')
@test.create_stubs({cinder: ('volume_get',
'retype_supported')})
def test_retype_volume_supported_action_item(self):
volume = self.cinder_volumes.get(name='v2_volume')
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.retype_supported().AndReturn(True)
self.mox.ReplayAll()
url = VOLUME_INDEX_URL + \
"?action=row_update&table=volumes&obj_id=" + volume.id
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertContains(res, 'Change Volume Type')
self.assertContains(res, 'retype')
@test.create_stubs({cinder: ('volume_get',
'volume_retype',
'volume_type_list')})
def test_retype_volume(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_1')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_type_list(
IsA(http.HttpRequest)).AndReturn(self.cinder_volume_types.list())
cinder.volume_retype(
IsA(http.HttpRequest),
volume.id,
form_data['volume_type'],
form_data['migration_policy']).AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',
'volume_type_list')})
def test_retype_volume_same_type(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_2')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_type_list(
IsA(http.HttpRequest)).AndReturn(self.cinder_volume_types.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertFormError(res,
'form',
'volume_type',
'New volume type must be different from the '
'original volume type "%s".' % volume_type.name)
def test_encryption_false(self):
self._test_encryption(False)
def test_encryption_true(self):
self._test_encryption(True)
@test.create_stubs({cinder: ('volume_list',
'volume_backup_supported',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def _test_encryption(self, encryption):
volumes = self.volumes.list()
for volume in volumes:
volume.encrypted = encryption
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes('backup_supported').AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
rows = res.context['volumes_table'].get_rows()
if encryption:
column_value = 'Yes'
else:
column_value = 'No'
for row in rows:
self.assertEqual(row.cells['encryption'].data, column_value)
| |
"""Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import array
import inspect
import os
import sys
import zlib
try:
xrange # py2
except NameError:
xrange = range # py3
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
"""
return self.decompressobj.decompress(value)
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if type('') is not type(b''):
def u(s):
return s
bytes_type = bytes
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
bytes_type = str
unicode_type = unicode
basestring_type = basestring
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, **kwargs):
base = cls.configurable_base()
args = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
args.update(base.__impl_kwargs)
else:
impl = cls
args.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatiblity with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(**args)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes_type)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = inspect.getargspec(func).args.index(self.name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def _websocket_mask_python(mask, data):
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask = array.array("B", mask)
unmasked = array.array("B", data)
for i in xrange(len(data)):
unmasked[i] = unmasked[i] ^ mask[i % 4]
if hasattr(unmasked, 'tobytes'):
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked.tobytes()
else:
return unmasked.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
| |
import json
import pytest
from sqlalchemy_json_api import QueryBuilder
@pytest.mark.usefixtures('table_creator', 'dataset')
class TestSelectRelated(object):
@pytest.mark.parametrize(
('id', 'result'),
(
(
1,
{'data': [{'type': 'users', 'id': '2'}]}
),
(
2,
{'data': [
{'type': 'users', 'id': '1'},
{'type': 'users', 'id': '3'},
{'type': 'users', 'id': '4'}
]}
)
)
)
def test_to_many_relationship_with_ids_only(
self,
query_builder,
session,
user_cls,
id,
result
):
query = query_builder.select_related(
session.query(user_cls).get(id),
'all_friends',
fields={'users': []}
)
assert session.execute(query).scalar() == result
@pytest.mark.parametrize(
('id', 'fields', 'result'),
(
(
2,
{'categories': ['name']},
{'data': {
'type':
'categories',
'id': '1',
'attributes': {
'name': 'Some category'
}
}}
),
(
5,
{'categories': ['parent']},
{'data': {
'type': 'categories',
'id': '3',
'relationships': {
'parent': {
'data': {
'id': '2',
'type': 'categories'
}
}
}
}}
)
)
)
def test_to_one_relationship(
self,
query_builder,
session,
category_cls,
id,
fields,
result
):
query = query_builder.select_related(
session.query(category_cls).get(id),
'parent',
fields=fields
)
assert session.execute(query).scalar() == result
@pytest.mark.usefixtures('table_creator', 'dataset')
class TestSelectRelationshipWithLinks(object):
@pytest.fixture
def query_builder(self, model_mapping):
return QueryBuilder(model_mapping, base_url='/')
@pytest.mark.parametrize(
('id', 'links', 'result'),
(
(
1,
{'self': '/users/1/all_friends'},
{
'data': [
{
'type': 'users',
'id': '2',
'links': {'self': '/users/2'}
}
],
'links': {'self': '/users/1/all_friends'}
}
),
(
2,
{'self': '/users/2/all_friends'},
{
'data': [
{
'type': 'users',
'id': '1',
'links': {'self': '/users/1'}
},
{
'type': 'users',
'id': '3',
'links': {'self': '/users/3'}
},
{
'type': 'users',
'id': '4',
'links': {'self': '/users/4'}
}
],
'links': {
'self': '/users/2/all_friends',
}
}
)
)
)
def test_to_many_relationship(
self,
query_builder,
session,
user_cls,
id,
links,
result
):
query = query_builder.select_related(
session.query(user_cls).get(id),
'all_friends',
fields={'users': []},
links=links
)
assert session.execute(query).scalar() == result
@pytest.mark.parametrize(
('id', 'links', 'result'),
(
(
2,
{'self': '/categories/2/parent'},
{
'data': {
'type': 'categories',
'id': '1',
'links': {'self': '/categories/1'}
},
'links': {'self': '/categories/2/parent'}
}
),
(
5,
{'self': '/categories/5/parent'},
{
'data': {
'type': 'categories',
'id': '3',
'links': {'self': '/categories/3'}
},
'links': {'self': '/categories/5/parent'}
}
)
)
)
def test_to_one_parent_child_relationship(
self,
query_builder,
session,
category_cls,
id,
result,
links
):
query = query_builder.select_related(
session.query(category_cls).get(id),
'parent',
fields={'categories': []},
links=links
)
assert session.execute(query).scalar() == result
@pytest.mark.parametrize(
('id', 'links', 'result'),
(
(
1,
{
'self': '/articles/1/category',
},
{
'data': {
'type': 'categories',
'id': '1',
'links': {
'self': '/categories/1',
}
},
'links': {
'self': '/articles/1/category',
}
}
),
)
)
def test_to_one_relationship(
self,
query_builder,
session,
article_cls,
id,
links,
result
):
query = query_builder.select_related(
session.query(article_cls).get(id),
'category',
fields={'categories': []},
links=links
)
assert session.execute(query).scalar() == result
@pytest.mark.parametrize(
('id', 'result'),
(
(
1,
{
'data': {
'type': 'categories',
'id': '1',
'links': {
'self': '/categories/1',
}
}
}
),
)
)
def test_as_text_parameter(
self,
query_builder,
session,
article_cls,
id,
result
):
query = query_builder.select_related(
session.query(article_cls).get(id),
'category',
fields={'categories': []},
as_text=True
)
assert json.loads(session.execute(query).scalar()) == result
@pytest.mark.parametrize(
('id', 'result'),
(
(
1,
{'data': None}
),
)
)
def test_empty_result(
self,
query_builder,
session,
category_cls,
id,
result
):
query = query_builder.select_related(
session.query(category_cls).get(id),
'parent',
fields={'categories': []}
)
assert session.execute(query).scalar() == result
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_model.object_detection.metrics.coco_tools."""
import json
import os
import re
import numpy as np
from pycocotools import mask
import tensorflow.compat.v1 as tf
from object_detection.metrics import coco_tools
class CocoToolsTest(tf.test.TestCase):
def setUp(self):
groundtruth_annotations_list = [
{
'id': 1,
'image_id': 'first',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'area': 100.**2,
'iscrowd': 0
},
{
'id': 2,
'image_id': 'second',
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'area': 50.**2,
'iscrowd': 0
},
]
image_list = [{'id': 'first'}, {'id': 'second'}]
category_list = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
self._groundtruth_dict = {
'annotations': groundtruth_annotations_list,
'images': image_list,
'categories': category_list
}
self._detections_list = [
{
'image_id': 'first',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'score': .8
},
{
'image_id': 'second',
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'score': .7
},
]
def testCocoWrappers(self):
groundtruth = coco_tools.COCOWrapper(self._groundtruth_dict)
detections = groundtruth.LoadAnnotations(self._detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections)
summary_metrics, _ = evaluator.ComputeMetrics()
self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP'])
def testExportGroundtruthToCOCO(self):
image_ids = ['first', 'second']
groundtruth_boxes = [np.array([[100, 100, 200, 200]], np.float),
np.array([[50, 50, 100, 100]], np.float)]
groundtruth_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'groundtruth.json')
result = coco_tools.ExportGroundtruthToCOCO(
image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=output_path)
self.assertDictEqual(result, self._groundtruth_dict)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
# The json output should have floats written to 4 digits of precision.
matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE)
self.assertTrue(matcher.findall(written_result))
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testExportDetectionsToCOCO(self):
image_ids = ['first', 'second']
detections_boxes = [np.array([[100, 100, 200, 200]], np.float),
np.array([[50, 50, 100, 100]], np.float)]
detections_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detections_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'detections.json')
result = coco_tools.ExportDetectionsToCOCO(
image_ids,
detections_boxes,
detections_scores,
detections_classes,
categories,
output_path=output_path)
self.assertListEqual(result, self._detections_list)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
# The json output should have floats written to 4 digits of precision.
matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE)
self.assertTrue(matcher.findall(written_result))
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testExportSegmentsToCOCO(self):
image_ids = ['first', 'second']
detection_masks = [np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8), np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8)]
for i, detection_mask in enumerate(detection_masks):
detection_masks[i] = detection_mask[:, :, :, None]
detection_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detection_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'segments.json')
result = coco_tools.ExportSegmentsToCOCO(
image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
mask_load = mask.decode([written_result[0]['segmentation']])
self.assertTrue(np.allclose(mask_load, detection_masks[0]))
self.assertAlmostEqual(result, written_result)
def testExportKeypointsToCOCO(self):
image_ids = ['first', 'second']
detection_keypoints = [
np.array(
[[[100, 200], [300, 400], [500, 600]],
[[50, 150], [250, 350], [450, 550]]], dtype=np.int32),
np.array(
[[[110, 210], [310, 410], [510, 610]],
[[60, 160], [260, 360], [460, 560]]], dtype=np.int32)]
detection_scores = [np.array([.8, 0.2], np.float),
np.array([.7, 0.3], np.float)]
detection_classes = [np.array([1, 1], np.int32), np.array([1, 1], np.int32)]
categories = [{'id': 1, 'name': 'person', 'num_keypoints': 3},
{'id': 2, 'name': 'cat'},
{'id': 3, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'keypoints.json')
result = coco_tools.ExportKeypointsToCOCO(
image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testSingleImageDetectionBoxesExport(self):
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_boxes=boxes,
detection_classes=classes,
detection_scores=scores)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertAlmostEqual(annotation['score'], scores[i])
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
def testSingleImageDetectionMaskExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
coco_annotations = coco_tools.ExportSingleImageDetectionMasksToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_classes=classes,
detection_scores=scores,
detection_masks=masks)
expected_counts = ['04', '31', '4']
for i, mask_annotation in enumerate(coco_annotations):
self.assertEqual(mask_annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
mask_annotation['segmentation']), masks[i])))
self.assertEqual(mask_annotation['image_id'], 'first_image')
self.assertEqual(mask_annotation['category_id'], classes[i])
self.assertAlmostEqual(mask_annotation['score'], scores[i])
def testSingleImageGroundtruthExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
is_crowd = np.array([0, 1, 0], dtype=np.int32)
next_annotation_id = 1
expected_counts = ['04', '31', '4']
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
# Tests exporting with is_crowd.
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks,
groundtruth_is_crowd=is_crowd)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['iscrowd'], is_crowd[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
def testSingleImageGroundtruthExportWithKeypoints(self):
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]],
[[0, 0], [0.125, 0.125], [0.375, 0.375]],
[[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]],
dtype=np.float32)
visibilities = np.array([[2, 2, 2],
[2, 2, 0],
[2, 0, 0]], dtype=np.int32)
areas = np.array([15., 16., 17.])
classes = np.array([1, 2, 3], dtype=np.int32)
is_crowd = np.array([0, 1, 0], dtype=np.int32)
next_annotation_id = 1
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_keypoints=keypoints,
groundtruth_keypoint_visibilities=visibilities,
groundtruth_area=areas)
for i, annotation in enumerate(coco_annotations):
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
self.assertEqual(annotation['num_keypoints'], 3 - i)
self.assertEqual(annotation['area'], 15.0 + i)
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1])))
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0])))
self.assertTrue(
np.all(np.equal(annotation['keypoints'][2::3], visibilities[i])))
# Tests exporting with is_crowd.
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_keypoints=keypoints,
groundtruth_keypoint_visibilities=visibilities,
groundtruth_is_crowd=is_crowd)
for i, annotation in enumerate(coco_annotations):
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['iscrowd'], is_crowd[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
self.assertEqual(annotation['num_keypoints'], 3 - i)
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1])))
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0])))
self.assertTrue(
np.all(np.equal(annotation['keypoints'][2::3], visibilities[i])))
# Testing the area values are derived from the bounding boxes.
if i == 0:
self.assertAlmostEqual(annotation['area'], 1.0)
else:
self.assertAlmostEqual(annotation['area'], 0.25)
def testSingleImageDetectionBoxesExportWithKeypoints(self):
boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, 1, 1]],
dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, .5, .5]],
dtype=np.float32)
keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]],
[[0, 0], [0.125, 0.125], [0.375, 0.375]],
[[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]],
dtype=np.float32)
visibilities = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=np.int32)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_boxes=boxes,
detection_scores=scores,
detection_classes=classes,
detection_keypoints=keypoints,
detection_keypoint_visibilities=visibilities)
for i, annotation in enumerate(coco_annotations):
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['score'], scores[i])
self.assertEqual(annotation['num_keypoints'], 3)
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1])))
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0])))
self.assertTrue(
np.all(np.equal(annotation['keypoints'][2::3], visibilities[i])))
if __name__ == '__main__':
tf.test.main()
| |
import base64
import cv2
import numpy as np
import simplejson as json
import stasm
import sys
import os
from flask import (Flask, flash, jsonify, redirect,
render_template, request, session)
from passlib.hash import sha256_crypt
from sqlalchemy import or_
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
import db
from models import User
from utils import CONFIG, send_mail
from faceInfo import FaceInfo
application = Flask(__name__)
# TODO do this with api app.register_blueprint(api)
application.secret_key = CONFIG.get('app', 'secret_key')
application.debug = True
@application.route("/")
@application.route("/dashboard")
def index():
""" Directs logged in users to the dashboard
and others to the index. """
if session.get('loggedIn'):
return render_template('dashboard.html')
else:
return render_template('index.html')
@application.route('/login', methods=['POST'])
def login():
""" Confirm that a username and password match a User record in the db. """
username = request.args.get('username')
password = request.args.get('password')
success = False
message = ''
if username and password:
user_match = db.session.query(User)\
.filter(User.username == username).first()
if user_match and sha256_crypt.verify(password, user_match.password):
if user_match.active:
session['username'] = user_match.username,
session['userId'] = user_match.id,
session['loggedIn'] = True
success = True
else:
message = 'Please confirm your registration before logging in'
if user_match.face_analysis is not None:
session['loggedIn'] = False
success = False
message = 'takePhoto'
else:
message = 'Login credentials invalid'
else:
message = 'You must provide a username and password'
return jsonify(success=success, message=message)
@application.route('/register', methods=['POST'])
def register():
""" Registers a new user. """
username = request.args.get('username')
email = request.args.get('email')
password = request.args.get('password')
if not all([username, email, password]):
msg = 'You must provide a username, email, and password to register.'
return jsonify(success=False, message=msg)
existing_accounts = db.session.query(User)\
.filter(or_(User.username == username,
User.email == email)).all()
if existing_accounts:
usernames = [u.username for u in existing_accounts]
msg = 'There is already an account with this '
if username in usernames:
msg += 'username'
else:
msg += 'email address'
return jsonify(success=False, message=msg)
new_user = User(username=username, email=email, active=False,
password=sha256_crypt.encrypt(password))
new_user.insert()
site_url = CONFIG.get('app', 'url')
verify_link = 'https://{0}/verify?id={1}'.format(site_url, new_user.id)
subject = "Welcome to {0}!".format(CONFIG.get('app', 'name'))
email_msg = '\n'.join([
'Welcome! Your account has been created!',
'Please click the link below to verify your email address.',
verify_link, '', '',
'Thank you for joining. We hope you enjoy your account.'
])
send_mail(new_user.email, subject, email_msg)
return jsonify(success=True,
message='Please check your email to verify your account.')
@application.route('/checkUsername', methods=['POST'])
def checkUsername():
""" Checks for existing usernames for frontend validation."""
username = request.args.get('username')
existing_match = db.session.query(User)\
.filter(User.username == username).all()
if existing_match:
return jsonify(success=False, message='Username already in use.')
return jsonify(success=True)
@application.route('/checkEmail', methods=['POST'])
def checkEmail():
""" Checks for existing emails for frontend validation."""
email = request.args.get('email')
existing_match = db.session.query(User)\
.filter(User.email == email).all()
if existing_match:
msg = ('Email already in use. ' +
'Please sign in or recover your account information')
return jsonify(success=False, message=msg)
return jsonify(success=True)
@application.route('/verify')
def verify():
""" Activates a user after they click the email link. """
user_id = request.args.get('id')
if not user_id:
raise UserWarning("User ID missing")
user = db.session.query(User).filter(User.id == user_id).first()
if not user:
raise UserWarning("No user found matching ID")
user.active = True
session['username'] = user.username,
session['userId'] = user.id,
session['loggedIn'] = True
flash('Your account is now verified!', 'info')
return render_template('dashboard.html')
@application.route('/logout')
def logout():
""" Use the session to logout the user and redirect to index """
session.pop('username', None)
session.pop('userId', None)
session.pop('loggedIn', None)
flash('You have sucessfully logged out.', 'info')
return redirect('https://' + CONFIG.get('app', 'url'))
@application.route('/analyzePhoto', methods=['POST'])
def analyzePhoto():
""" Analyzes the photo and stores it on the user facial_analysis """
photo = request.files['webcam']
np_arr = np.fromstring(photo.read(), np.uint8)
gray_img = cv2.imdecode(np_arr, cv2.IMREAD_GRAYSCALE)
landmarks = stasm.search_single(gray_img)
if len(landmarks) == 0:
return jsonify(success=False, message="Face not found. Please try again.")
face = FaceInfo()
face.generateInfoFromStasm(landmarks)
landmarks = stasm.force_points_into_image(landmarks, gray_img)
for point in landmarks:
gray_img[round(point[1])][round(point[0])] = 255
comparison_photo = cv2.imencode('.jpeg', gray_img)[1]
b64_comparison_photo = base64.encodestring(comparison_photo)
return jsonify(data=face.getInfo(),
img='data:image/jpeg;base64,' + b64_comparison_photo,
success=True)
@application.route('/compareFace', methods=['POST'])
def compareFace():
""" Compares given face json to session user. """
face_data = request.get_json(force=True)
user_id = session['userId'][0]
user = db.session.query(User).filter(User.id == user_id).first()
known_face = FaceInfo()
known_face.generateInfoFromJson(user.face_analysis)
match_face = FaceInfo()
match_face._info = face_data
match = known_face.isSamePerson(match_face)
session['loggedIn'] = match
return jsonify(success=match)
@application.route('/confirmPhoto', methods=['POST'])
def confirmPhoto():
""" User has confirmed the photo identified facial features. Save it """
confirmed_data = request.get_json(force=True)
user_id = session['userId'][0]
user = db.session.query(User).filter(User.id == user_id).first()
user.face_analysis = json.dumps(confirmed_data)
return jsonify(success=True, data=user.face_analysis)
@application.context_processor
def inject_globals():
return dict(app_name=CONFIG.get('app', 'name'))
@application.teardown_appcontext
def close_db_session(error):
""" Make sure the database connection closes after each request"""
db.safe_commit()
db.session.close()
if __name__ == "__main__":
application.run()
| |
"""Hypergeometric and Meijer G-functions"""
from __future__ import print_function, division
from sympy.core import S, I, pi, oo, zoo, ilcm, Mod
from sympy.core.function import Function, Derivative, ArgumentIndexError
from sympy.core.containers import Tuple
from sympy.core.compatibility import reduce, range
from sympy.core.mul import Mul
from sympy.core.symbol import Dummy
from sympy.functions import (sqrt, exp, log, sin, cos, asin, atan,
sinh, cosh, asinh, acosh, atanh, acoth)
class TupleArg(Tuple):
def limit(self, x, xlim, dir='+'):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return TupleArg(*[limit(f, x, xlim, dir) for f in self.args])
# TODO should __new__ accept **options?
# TODO should constructors should check if parameters are sensible?
def _prep_tuple(v):
"""
Turn an iterable argument V into a Tuple and unpolarify, since both
hypergeometric and meijer g-functions are unbranched in their parameters.
Examples
========
>>> from sympy.functions.special.hyper import _prep_tuple
>>> _prep_tuple([1, 2, 3])
(1, 2, 3)
>>> _prep_tuple((4, 5))
(4, 5)
>>> _prep_tuple((7, 8, 9))
(7, 8, 9)
"""
from sympy import unpolarify
return TupleArg(*[unpolarify(x) for x in v])
class TupleParametersBase(Function):
""" Base class that takes care of differentiation, when some of
the arguments are actually tuples. """
# This is not deduced automatically since there are Tuples as arguments.
is_commutative = True
def _eval_derivative(self, s):
try:
res = 0
if self.args[0].has(s) or self.args[1].has(s):
for i, p in enumerate(self._diffargs):
m = self._diffargs[i].diff(s)
if m != 0:
res += self.fdiff((1, i))*m
return res + self.fdiff(3)*self.args[2].diff(s)
except (ArgumentIndexError, NotImplementedError):
return Derivative(self, s)
class hyper(TupleParametersBase):
r"""
The (generalized) hypergeometric function is defined by a series where
the ratios of successive terms are a rational function of the summation
index. When convergent, it is continued analytically to the largest
possible domain.
The hypergeometric function depends on two vectors of parameters, called
the numerator parameters :math:`a_p`, and the denominator parameters
:math:`b_q`. It also has an argument :math:`z`. The series definition is
.. math ::
{}_pF_q\left(\begin{matrix} a_1, \dots, a_p \\ b_1, \dots, b_q \end{matrix}
\middle| z \right)
= \sum_{n=0}^\infty \frac{(a_1)_n \dots (a_p)_n}{(b_1)_n \dots (b_q)_n}
\frac{z^n}{n!},
where :math:`(a)_n = (a)(a+1)\dots(a+n-1)` denotes the rising factorial.
If one of the :math:`b_q` is a non-positive integer then the series is
undefined unless one of the `a_p` is a larger (i.e. smaller in
magnitude) non-positive integer. If none of the :math:`b_q` is a
non-positive integer and one of the :math:`a_p` is a non-positive
integer, then the series reduces to a polynomial. To simplify the
following discussion, we assume that none of the :math:`a_p` or
:math:`b_q` is a non-positive integer. For more details, see the
references.
The series converges for all :math:`z` if :math:`p \le q`, and thus
defines an entire single-valued function in this case. If :math:`p =
q+1` the series converges for :math:`|z| < 1`, and can be continued
analytically into a half-plane. If :math:`p > q+1` the series is
divergent for all :math:`z`.
Note: The hypergeometric function constructor currently does *not* check
if the parameters actually yield a well-defined function.
Examples
========
The parameters :math:`a_p` and :math:`b_q` can be passed as arbitrary
iterables, for example:
>>> from sympy.functions import hyper
>>> from sympy.abc import x, n, a
>>> hyper((1, 2, 3), [3, 4], x)
hyper((1, 2, 3), (3, 4), x)
There is also pretty printing (it looks better using unicode):
>>> from sympy import pprint
>>> pprint(hyper((1, 2, 3), [3, 4], x), use_unicode=False)
_
|_ /1, 2, 3 | \
| | | x|
3 2 \ 3, 4 | /
The parameters must always be iterables, even if they are vectors of
length one or zero:
>>> hyper((1, ), [], x)
hyper((1,), (), x)
But of course they may be variables (but if they depend on x then you
should not expect much implemented functionality):
>>> hyper((n, a), (n**2,), x)
hyper((n, a), (n**2,), x)
The hypergeometric function generalizes many named special functions.
The function hyperexpand() tries to express a hypergeometric function
using named special functions.
For example:
>>> from sympy import hyperexpand
>>> hyperexpand(hyper([], [], x))
exp(x)
You can also use expand_func:
>>> from sympy import expand_func
>>> expand_func(x*hyper([1, 1], [2], -x))
log(x + 1)
More examples:
>>> from sympy import S
>>> hyperexpand(hyper([], [S(1)/2], -x**2/4))
cos(x)
>>> hyperexpand(x*hyper([S(1)/2, S(1)/2], [S(3)/2], x**2))
asin(x)
We can also sometimes hyperexpand parametric functions:
>>> from sympy.abc import a
>>> hyperexpand(hyper([-a], [], x))
(-x + 1)**a
See Also
========
sympy.simplify.hyperexpand
sympy.functions.special.gamma_functions.gamma
meijerg
References
==========
.. [1] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
.. [2] http://en.wikipedia.org/wiki/Generalized_hypergeometric_function
"""
def __new__(cls, ap, bq, z):
# TODO should we check convergence conditions?
return Function.__new__(cls, _prep_tuple(ap), _prep_tuple(bq), z)
@classmethod
def eval(cls, ap, bq, z):
from sympy import unpolarify
if len(ap) <= len(bq):
nz = unpolarify(z)
if z != nz:
return hyper(ap, bq, nz)
def fdiff(self, argindex=3):
if argindex != 3:
raise ArgumentIndexError(self, argindex)
nap = Tuple(*[a + 1 for a in self.ap])
nbq = Tuple(*[b + 1 for b in self.bq])
fac = Mul(*self.ap)/Mul(*self.bq)
return fac*hyper(nap, nbq, self.argument)
def _eval_expand_func(self, **hints):
from sympy import gamma, hyperexpand
if len(self.ap) == 2 and len(self.bq) == 1 and self.argument == 1:
a, b = self.ap
c = self.bq[0]
return gamma(c)*gamma(c - a - b)/gamma(c - a)/gamma(c - b)
return hyperexpand(self)
def _eval_rewrite_as_Sum(self, ap, bq, z):
from sympy.functions import factorial, RisingFactorial, Piecewise
from sympy import Sum
n = Dummy("n", integer=True)
rfap = Tuple(*[RisingFactorial(a, n) for a in ap])
rfbq = Tuple(*[RisingFactorial(b, n) for b in bq])
coeff = Mul(*rfap) / Mul(*rfbq)
return Piecewise((Sum(coeff * z**n / factorial(n), (n, 0, oo)),
self.convergence_statement), (self, True))
@property
def argument(self):
""" Argument of the hypergeometric function. """
return self.args[2]
@property
def ap(self):
""" Numerator parameters of the hypergeometric function. """
return Tuple(*self.args[0])
@property
def bq(self):
""" Denominator parameters of the hypergeometric function. """
return Tuple(*self.args[1])
@property
def _diffargs(self):
return self.ap + self.bq
@property
def eta(self):
""" A quantity related to the convergence of the series. """
return sum(self.ap) - sum(self.bq)
@property
def radius_of_convergence(self):
"""
Compute the radius of convergence of the defining series.
Note that even if this is not oo, the function may still be evaluated
outside of the radius of convergence by analytic continuation. But if
this is zero, then the function is not actually defined anywhere else.
>>> from sympy.functions import hyper
>>> from sympy.abc import z
>>> hyper((1, 2), [3], z).radius_of_convergence
1
>>> hyper((1, 2, 3), [4], z).radius_of_convergence
0
>>> hyper((1, 2), (3, 4), z).radius_of_convergence
oo
"""
if any(a.is_integer and (a <= 0) == True for a in self.ap + self.bq):
aints = [a for a in self.ap if a.is_Integer and (a <= 0) == True]
bints = [a for a in self.bq if a.is_Integer and (a <= 0) == True]
if len(aints) < len(bints):
return S(0)
popped = False
for b in bints:
cancelled = False
while aints:
a = aints.pop()
if a >= b:
cancelled = True
break
popped = True
if not cancelled:
return S(0)
if aints or popped:
# There are still non-positive numerator parameters.
# This is a polynomial.
return oo
if len(self.ap) == len(self.bq) + 1:
return S(1)
elif len(self.ap) <= len(self.bq):
return oo
else:
return S(0)
@property
def convergence_statement(self):
""" Return a condition on z under which the series converges. """
from sympy import And, Or, re, Ne, oo
R = self.radius_of_convergence
if R == 0:
return False
if R == oo:
return True
# The special functions and their approximations, page 44
e = self.eta
z = self.argument
c1 = And(re(e) < 0, abs(z) <= 1)
c2 = And(0 <= re(e), re(e) < 1, abs(z) <= 1, Ne(z, 1))
c3 = And(re(e) >= 1, abs(z) < 1)
return Or(c1, c2, c3)
def _eval_simplify(self, ratio, measure):
from sympy.simplify.hyperexpand import hyperexpand
return hyperexpand(self)
def _sage_(self):
import sage.all as sage
ap = [arg._sage_() for arg in self.args[0]]
bq = [arg._sage_() for arg in self.args[1]]
return sage.hypergeometric(ap, bq, self.argument._sage_())
class meijerg(TupleParametersBase):
r"""
The Meijer G-function is defined by a Mellin-Barnes type integral that
resembles an inverse Mellin transform. It generalizes the hypergeometric
functions.
The Meijer G-function depends on four sets of parameters. There are
"*numerator parameters*"
:math:`a_1, \dots, a_n` and :math:`a_{n+1}, \dots, a_p`, and there are
"*denominator parameters*"
:math:`b_1, \dots, b_m` and :math:`b_{m+1}, \dots, b_q`.
Confusingly, it is traditionally denoted as follows (note the position
of `m`, `n`, `p`, `q`, and how they relate to the lengths of the four
parameter vectors):
.. math ::
G_{p,q}^{m,n} \left(\begin{matrix}a_1, \dots, a_n & a_{n+1}, \dots, a_p \\
b_1, \dots, b_m & b_{m+1}, \dots, b_q
\end{matrix} \middle| z \right).
However, in sympy the four parameter vectors are always available
separately (see examples), so that there is no need to keep track of the
decorating sub- and super-scripts on the G symbol.
The G function is defined as the following integral:
.. math ::
\frac{1}{2 \pi i} \int_L \frac{\prod_{j=1}^m \Gamma(b_j - s)
\prod_{j=1}^n \Gamma(1 - a_j + s)}{\prod_{j=m+1}^q \Gamma(1- b_j +s)
\prod_{j=n+1}^p \Gamma(a_j - s)} z^s \mathrm{d}s,
where :math:`\Gamma(z)` is the gamma function. There are three possible
contours which we will not describe in detail here (see the references).
If the integral converges along more than one of them the definitions
agree. The contours all separate the poles of :math:`\Gamma(1-a_j+s)`
from the poles of :math:`\Gamma(b_k-s)`, so in particular the G function
is undefined if :math:`a_j - b_k \in \mathbb{Z}_{>0}` for some
:math:`j \le n` and :math:`k \le m`.
The conditions under which one of the contours yields a convergent integral
are complicated and we do not state them here, see the references.
Note: Currently the Meijer G-function constructor does *not* check any
convergence conditions.
Examples
========
You can pass the parameters either as four separate vectors:
>>> from sympy.functions import meijerg
>>> from sympy.abc import x, a
>>> from sympy.core.containers import Tuple
>>> from sympy import pprint
>>> pprint(meijerg((1, 2), (a, 4), (5,), [], x), use_unicode=False)
__1, 2 /1, 2 a, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
or as two nested vectors:
>>> pprint(meijerg([(1, 2), (3, 4)], ([5], Tuple()), x), use_unicode=False)
__1, 2 /1, 2 3, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
As with the hypergeometric function, the parameters may be passed as
arbitrary iterables. Vectors of length zero and one also have to be
passed as iterables. The parameters need not be constants, but if they
depend on the argument then not much implemented functionality should be
expected.
All the subvectors of parameters are available:
>>> from sympy import pprint
>>> g = meijerg([1], [2], [3], [4], x)
>>> pprint(g, use_unicode=False)
__1, 1 /1 2 | \
/__ | | x|
\_|2, 2 \3 4 | /
>>> g.an
(1,)
>>> g.ap
(1, 2)
>>> g.aother
(2,)
>>> g.bm
(3,)
>>> g.bq
(3, 4)
>>> g.bother
(4,)
The Meijer G-function generalizes the hypergeometric functions.
In some cases it can be expressed in terms of hypergeometric functions,
using Slater's theorem. For example:
>>> from sympy import hyperexpand
>>> from sympy.abc import a, b, c
>>> hyperexpand(meijerg([a], [], [c], [b], x), allow_hyper=True)
x**c*gamma(-a + c + 1)*hyper((-a + c + 1,),
(-b + c + 1,), -x)/gamma(-b + c + 1)
Thus the Meijer G-function also subsumes many named functions as special
cases. You can use expand_func or hyperexpand to (try to) rewrite a
Meijer G-function in terms of named special functions. For example:
>>> from sympy import expand_func, S
>>> expand_func(meijerg([[],[]], [[0],[]], -x))
exp(x)
>>> hyperexpand(meijerg([[],[]], [[S(1)/2],[0]], (x/2)**2))
sin(x)/sqrt(pi)
See Also
========
hyper
sympy.simplify.hyperexpand
References
==========
.. [1] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
.. [2] http://en.wikipedia.org/wiki/Meijer_G-function
"""
def __new__(cls, *args):
if len(args) == 5:
args = [(args[0], args[1]), (args[2], args[3]), args[4]]
if len(args) != 3:
raise TypeError("args must be either as, as', bs, bs', z or "
"as, bs, z")
def tr(p):
if len(p) != 2:
raise TypeError("wrong argument")
return TupleArg(_prep_tuple(p[0]), _prep_tuple(p[1]))
arg0, arg1 = tr(args[0]), tr(args[1])
if Tuple(arg0, arg1).has(oo, zoo, -oo):
raise ValueError("G-function parameters must be finite")
if any((a - b).is_Integer and a - b > 0
for a in arg0[0] for b in arg1[0]):
raise ValueError("no parameter a1, ..., an may differ from "
"any b1, ..., bm by a positive integer")
# TODO should we check convergence conditions?
return Function.__new__(cls, arg0, arg1, args[2])
def fdiff(self, argindex=3):
if argindex != 3:
return self._diff_wrt_parameter(argindex[1])
if len(self.an) >= 1:
a = list(self.an)
a[0] -= 1
G = meijerg(a, self.aother, self.bm, self.bother, self.argument)
return 1/self.argument * ((self.an[0] - 1)*self + G)
elif len(self.bm) >= 1:
b = list(self.bm)
b[0] += 1
G = meijerg(self.an, self.aother, b, self.bother, self.argument)
return 1/self.argument * (self.bm[0]*self - G)
else:
return S.Zero
def _diff_wrt_parameter(self, idx):
# Differentiation wrt a parameter can only be done in very special
# cases. In particular, if we want to differentiate with respect to
# `a`, all other gamma factors have to reduce to rational functions.
#
# Let MT denote mellin transform. Suppose T(-s) is the gamma factor
# appearing in the definition of G. Then
#
# MT(log(z)G(z)) = d/ds T(s) = d/da T(s) + ...
#
# Thus d/da G(z) = log(z)G(z) - ...
# The ... can be evaluated as a G function under the above conditions,
# the formula being most easily derived by using
#
# d Gamma(s + n) Gamma(s + n) / 1 1 1 \
# -- ------------ = ------------ | - + ---- + ... + --------- |
# ds Gamma(s) Gamma(s) \ s s + 1 s + n - 1 /
#
# which follows from the difference equation of the digamma function.
# (There is a similar equation for -n instead of +n).
# We first figure out how to pair the parameters.
an = list(self.an)
ap = list(self.aother)
bm = list(self.bm)
bq = list(self.bother)
if idx < len(an):
an.pop(idx)
else:
idx -= len(an)
if idx < len(ap):
ap.pop(idx)
else:
idx -= len(ap)
if idx < len(bm):
bm.pop(idx)
else:
bq.pop(idx - len(bm))
pairs1 = []
pairs2 = []
for l1, l2, pairs in [(an, bq, pairs1), (ap, bm, pairs2)]:
while l1:
x = l1.pop()
found = None
for i, y in enumerate(l2):
if not Mod((x - y).simplify(), 1):
found = i
break
if found is None:
raise NotImplementedError('Derivative not expressible '
'as G-function?')
y = l2[i]
l2.pop(i)
pairs.append((x, y))
# Now build the result.
res = log(self.argument)*self
for a, b in pairs1:
sign = 1
n = a - b
base = b
if n < 0:
sign = -1
n = b - a
base = a
for k in range(n):
res -= sign*meijerg(self.an + (base + k + 1,), self.aother,
self.bm, self.bother + (base + k + 0,),
self.argument)
for a, b in pairs2:
sign = 1
n = b - a
base = a
if n < 0:
sign = -1
n = a - b
base = b
for k in range(n):
res -= sign*meijerg(self.an, self.aother + (base + k + 1,),
self.bm + (base + k + 0,), self.bother,
self.argument)
return res
def get_period(self):
"""
Return a number P such that G(x*exp(I*P)) == G(x).
>>> from sympy.functions.special.hyper import meijerg
>>> from sympy.abc import z
>>> from sympy import pi, S
>>> meijerg([1], [], [], [], z).get_period()
2*pi
>>> meijerg([pi], [], [], [], z).get_period()
oo
>>> meijerg([1, 2], [], [], [], z).get_period()
oo
>>> meijerg([1,1], [2], [1, S(1)/2, S(1)/3], [1], z).get_period()
12*pi
"""
# This follows from slater's theorem.
def compute(l):
# first check that no two differ by an integer
for i, b in enumerate(l):
if not b.is_Rational:
return oo
for j in range(i + 1, len(l)):
if not Mod((b - l[j]).simplify(), 1):
return oo
return reduce(ilcm, (x.q for x in l), 1)
beta = compute(self.bm)
alpha = compute(self.an)
p, q = len(self.ap), len(self.bq)
if p == q:
if beta == oo or alpha == oo:
return oo
return 2*pi*ilcm(alpha, beta)
elif p < q:
return 2*pi*beta
else:
return 2*pi*alpha
def _eval_expand_func(self, **hints):
from sympy import hyperexpand
return hyperexpand(self)
def _eval_evalf(self, prec):
# The default code is insufficient for polar arguments.
# mpmath provides an optional argument "r", which evaluates
# G(z**(1/r)). I am not sure what its intended use is, but we hijack it
# here in the following way: to evaluate at a number z of |argument|
# less than (say) n*pi, we put r=1/n, compute z' = root(z, n)
# (carefully so as not to loose the branch information), and evaluate
# G(z'**(1/r)) = G(z'**n) = G(z).
from sympy.functions import exp_polar, ceiling
from sympy import Expr
import mpmath
z = self.argument
znum = self.argument._eval_evalf(prec)
if znum.has(exp_polar):
znum, branch = znum.as_coeff_mul(exp_polar)
if len(branch) != 1:
return
branch = branch[0].args[0]/I
else:
branch = S(0)
n = ceiling(abs(branch/S.Pi)) + 1
znum = znum**(S(1)/n)*exp(I*branch / n)
# Convert all args to mpf or mpc
try:
[z, r, ap, bq] = [arg._to_mpmath(prec)
for arg in [znum, 1/n, self.args[0], self.args[1]]]
except ValueError:
return
with mpmath.workprec(prec):
v = mpmath.meijerg(ap, bq, z, r)
return Expr._from_mpmath(v, prec)
def integrand(self, s):
""" Get the defining integrand D(s). """
from sympy import gamma
return self.argument**s \
* Mul(*(gamma(b - s) for b in self.bm)) \
* Mul(*(gamma(1 - a + s) for a in self.an)) \
/ Mul(*(gamma(1 - b + s) for b in self.bother)) \
/ Mul(*(gamma(a - s) for a in self.aother))
@property
def argument(self):
""" Argument of the Meijer G-function. """
return self.args[2]
@property
def an(self):
""" First set of numerator parameters. """
return Tuple(*self.args[0][0])
@property
def ap(self):
""" Combined numerator parameters. """
return Tuple(*(self.args[0][0] + self.args[0][1]))
@property
def aother(self):
""" Second set of numerator parameters. """
return Tuple(*self.args[0][1])
@property
def bm(self):
""" First set of denominator parameters. """
return Tuple(*self.args[1][0])
@property
def bq(self):
""" Combined denominator parameters. """
return Tuple(*(self.args[1][0] + self.args[1][1]))
@property
def bother(self):
""" Second set of denominator parameters. """
return Tuple(*self.args[1][1])
@property
def _diffargs(self):
return self.ap + self.bq
@property
def nu(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return sum(self.bq) - sum(self.ap)
@property
def delta(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return len(self.bm) + len(self.an) - S(len(self.ap) + len(self.bq))/2
class HyperRep(Function):
"""
A base class for "hyper representation functions".
This is used exclusively in hyperexpand(), but fits more logically here.
pFq is branched at 1 if p == q+1. For use with slater-expansion, we want
define an "analytic continuation" to all polar numbers, which is
continuous on circles and on the ray t*exp_polar(I*pi). Moreover, we want
a "nice" expression for the various cases.
This base class contains the core logic, concrete derived classes only
supply the actual functions.
"""
@classmethod
def eval(cls, *args):
from sympy import unpolarify
newargs = tuple(map(unpolarify, args[:-1])) + args[-1:]
if args != newargs:
return cls(*newargs)
@classmethod
def _expr_small(cls, x):
""" An expression for F(x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_small_minus(cls, x):
""" An expression for F(-x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_big(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n)*x), |x| > 1. """
raise NotImplementedError
@classmethod
def _expr_big_minus(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n + pi*I)*x), |x| > 1. """
raise NotImplementedError
def _eval_rewrite_as_nonrep(self, *args):
from sympy import Piecewise
x, n = self.args[-1].extract_branch_factor(allow_half=True)
minus = False
newargs = self.args[:-1] + (x,)
if not n.is_Integer:
minus = True
n -= S(1)/2
newerargs = newargs + (n,)
if minus:
small = self._expr_small_minus(*newargs)
big = self._expr_big_minus(*newerargs)
else:
small = self._expr_small(*newargs)
big = self._expr_big(*newerargs)
if big == small:
return small
return Piecewise((big, abs(x) > 1), (small, True))
def _eval_rewrite_as_nonrepsmall(self, *args):
x, n = self.args[-1].extract_branch_factor(allow_half=True)
args = self.args[:-1] + (x,)
if not n.is_Integer:
return self._expr_small_minus(*args)
return self._expr_small(*args)
class HyperRep_power1(HyperRep):
""" Return a representative for hyper([-a], [], z) == (1 - z)**a. """
@classmethod
def _expr_small(cls, a, x):
return (1 - x)**a
@classmethod
def _expr_small_minus(cls, a, x):
return (1 + x)**a
@classmethod
def _expr_big(cls, a, x, n):
if a.is_integer:
return cls._expr_small(a, x)
return (x - 1)**a*exp((2*n - 1)*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
if a.is_integer:
return cls._expr_small_minus(a, x)
return (1 + x)**a*exp(2*n*pi*I*a)
class HyperRep_power2(HyperRep):
""" Return a representative for hyper([a, a - 1/2], [2*a], z). """
@classmethod
def _expr_small(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 - x))**(1 - 2*a)
@classmethod
def _expr_small_minus(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 + x))**(1 - 2*a)
@classmethod
def _expr_big(cls, a, x, n):
sgn = -1
if n.is_odd:
sgn = 1
n -= 1
return 2**(2*a - 1)*(1 + sgn*I*sqrt(x - 1))**(1 - 2*a) \
*exp(-2*n*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
sgn = 1
if n.is_odd:
sgn = -1
return sgn*2**(2*a - 1)*(sqrt(1 + x) + sgn)**(1 - 2*a)*exp(-2*pi*I*a*n)
class HyperRep_log1(HyperRep):
""" Represent -z*hyper([1, 1], [2], z) == log(1 - z). """
@classmethod
def _expr_small(cls, x):
return log(1 - x)
@classmethod
def _expr_small_minus(cls, x):
return log(1 + x)
@classmethod
def _expr_big(cls, x, n):
return log(x - 1) + (2*n - 1)*pi*I
@classmethod
def _expr_big_minus(cls, x, n):
return log(1 + x) + 2*n*pi*I
class HyperRep_atanh(HyperRep):
""" Represent hyper([1/2, 1], [3/2], z) == atanh(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, x):
return atanh(sqrt(x))/sqrt(x)
def _expr_small_minus(cls, x):
return atan(sqrt(x))/sqrt(x)
def _expr_big(cls, x, n):
if n.is_even:
return (acoth(sqrt(x)) + I*pi/2)/sqrt(x)
else:
return (acoth(sqrt(x)) - I*pi/2)/sqrt(x)
def _expr_big_minus(cls, x, n):
if n.is_even:
return atan(sqrt(x))/sqrt(x)
else:
return (atan(sqrt(x)) - pi)/sqrt(x)
class HyperRep_asin1(HyperRep):
""" Represent hyper([1/2, 1/2], [3/2], z) == asin(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, z):
return asin(sqrt(z))/sqrt(z)
@classmethod
def _expr_small_minus(cls, z):
return asinh(sqrt(z))/sqrt(z)
@classmethod
def _expr_big(cls, z, n):
return S(-1)**n*((S(1)/2 - n)*pi/sqrt(z) + I*acosh(sqrt(z))/sqrt(z))
@classmethod
def _expr_big_minus(cls, z, n):
return S(-1)**n*(asinh(sqrt(z))/sqrt(z) + n*pi*I/sqrt(z))
class HyperRep_asin2(HyperRep):
""" Represent hyper([1, 1], [3/2], z) == asin(sqrt(z))/sqrt(z)/sqrt(1-z). """
# TODO this can be nicer
@classmethod
def _expr_small(cls, z):
return HyperRep_asin1._expr_small(z) \
/HyperRep_power1._expr_small(S(1)/2, z)
@classmethod
def _expr_small_minus(cls, z):
return HyperRep_asin1._expr_small_minus(z) \
/HyperRep_power1._expr_small_minus(S(1)/2, z)
@classmethod
def _expr_big(cls, z, n):
return HyperRep_asin1._expr_big(z, n) \
/HyperRep_power1._expr_big(S(1)/2, z, n)
@classmethod
def _expr_big_minus(cls, z, n):
return HyperRep_asin1._expr_big_minus(z, n) \
/HyperRep_power1._expr_big_minus(S(1)/2, z, n)
class HyperRep_sqrts1(HyperRep):
""" Return a representative for hyper([-a, 1/2 - a], [1/2], z). """
@classmethod
def _expr_small(cls, a, z):
return ((1 - sqrt(z))**(2*a) + (1 + sqrt(z))**(2*a))/2
@classmethod
def _expr_small_minus(cls, a, z):
return (1 + z)**a*cos(2*a*atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return ((sqrt(z) + 1)**(2*a)*exp(2*pi*I*n*a) +
(sqrt(z) - 1)**(2*a)*exp(2*pi*I*(n - 1)*a))/2
else:
n -= 1
return ((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n + 1)) +
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))/2
@classmethod
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z)**a*exp(2*pi*I*n*a)*cos(2*a*atan(sqrt(z)))
else:
return (1 + z)**a*exp(2*pi*I*n*a)*cos(2*a*atan(sqrt(z)) - 2*pi*a)
class HyperRep_sqrts2(HyperRep):
""" Return a representative for
sqrt(z)/2*[(1-sqrt(z))**2a - (1 + sqrt(z))**2a]
== -2*z/(2*a+1) d/dz hyper([-a - 1/2, -a], [1/2], z)"""
@classmethod
def _expr_small(cls, a, z):
return sqrt(z)*((1 - sqrt(z))**(2*a) - (1 + sqrt(z))**(2*a))/2
@classmethod
def _expr_small_minus(cls, a, z):
return sqrt(z)*(1 + z)**a*sin(2*a*atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return sqrt(z)/2*((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n - 1)) -
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))
else:
n -= 1
return sqrt(z)/2*((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n + 1)) -
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z)**a*exp(2*pi*I*n*a)*sqrt(z)*sin(2*a*atan(sqrt(z)))
else:
return (1 + z)**a*exp(2*pi*I*n*a)*sqrt(z) \
*sin(2*a*atan(sqrt(z)) - 2*pi*a)
class HyperRep_log2(HyperRep):
""" Represent log(1/2 + sqrt(1 - z)/2) == -z/4*hyper([3/2, 1, 1], [2, 2], z) """
@classmethod
def _expr_small(cls, z):
return log(S(1)/2 + sqrt(1 - z)/2)
@classmethod
def _expr_small_minus(cls, z):
return log(S(1)/2 + sqrt(1 + z)/2)
@classmethod
def _expr_big(cls, z, n):
if n.is_even:
return (n - S(1)/2)*pi*I + log(sqrt(z)/2) + I*asin(1/sqrt(z))
else:
return (n - S(1)/2)*pi*I + log(sqrt(z)/2) - I*asin(1/sqrt(z))
def _expr_big_minus(cls, z, n):
if n.is_even:
return pi*I*n + log(S(1)/2 + sqrt(1 + z)/2)
else:
return pi*I*n + log(sqrt(1 + z)/2 - S(1)/2)
class HyperRep_cosasin(HyperRep):
""" Represent hyper([a, -a], [1/2], z) == cos(2*a*asin(sqrt(z))). """
# Note there are many alternative expressions, e.g. as powers of a sum of
# square roots.
@classmethod
def _expr_small(cls, a, z):
return cos(2*a*asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return cosh(2*a*asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return cosh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return cosh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
class HyperRep_sinasin(HyperRep):
""" Represent 2*a*z*hyper([1 - a, 1 + a], [3/2], z)
== sqrt(z)/sqrt(1-z)*sin(2*a*asin(sqrt(z))) """
@classmethod
def _expr_small(cls, a, z):
return sqrt(z)/sqrt(1 - z)*sin(2*a*asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return -sqrt(z)/sqrt(1 + z)*sinh(2*a*asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return -1/sqrt(1 - 1/z)*sinh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return -1/sqrt(1 + 1/z)*sinh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
| |
#!/usr/bin/env python
# ===================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# See license.txt for license information.
# ===================================
from contextlib import contextmanager
import subprocess
import os
import sys
import glob
import codecs
import imp
import time
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
# [key] string Name;
# [write,required,ValueMap{"init", "upstart", "systemd"},
# Values{"init","upstart","systemd"}] string Controller;
# [write] boolean Enabled;
# [write,ValueMap{"Running", "Stopped"},Values{"Running",
# "Stopped"}] string State;
# [read] string Path;
global show_mof
show_mof = False
def init_vars(Name, Controller, Enabled, State):
if Name is None:
Name = ''
if Controller is None:
Controller = ''
if Enabled is None:
Enabled = False
Enabled = (Enabled == True)
if State is None:
State = ''
return Name, Controller.lower(), Enabled, State.lower()
def Set_Marshall(Name, Controller, Enabled, State):
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
retval = Set(Name, Controller, Enabled, State)
return retval
def Test_Marshall(Name, Controller, Enabled, State):
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
retval = Test(Name, Controller, Enabled, State)
return retval
def Get_Marshall(Name, Controller, Enabled, State):
arg_names = list(locals().keys())
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
retval = 0
(retval, Name, Controller, Enabled, State, Path) = Get(
Name, Controller, Enabled, State)
Name = protocol.MI_String(Name)
Controller = protocol.MI_String(Controller)
Enabled = protocol.MI_Boolean(Enabled)
State = protocol.MI_String(State)
Path = protocol.MI_String(Path)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
# ##########################
# Begin user defined DSC functions
# ##########################
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, Name, Controller, Enabled, State):
if not show_mof:
return
mof = ''
mof += op + ' nxService MyService'
mof += '{\n'
mof += ' Name = "' + Name + '"\n'
mof += ' Controller = "' + Controller + '"\n'
mof += ' Enabled = ' + str(Enabled) + '\n'
mof += ' State = "' + State + '"\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
Print(mof, file=f)
LG().Log('INFO', mof)
f.close()
def Print(s, file=sys.stdout):
file.write(s + '\n')
@contextmanager
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = codecs.open(filename, encoding='utf-8', mode=mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def RunGetOutput(cmd, no_output, chk_err=True):
"""
Wrapper for subprocess.check_output.
Execute 'cmd'. Returns return code and STDOUT,
trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
def check_output(no_output, *popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
if no_output:
out_file = None
else:
out_file = subprocess.PIPE
enEnv = os.environ.copy()
enEnv["LANG"] = "en_US.UTF8"
process = subprocess.Popen(stdout=out_file, env=enEnv, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" \
% (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
output=b''
try:
output = subprocess.check_output(
no_output, cmd, stderr=subprocess.STDOUT, shell=True)
if output is None:
output=b''
except subprocess.CalledProcessError as e:
if chk_err:
Print('CalledProcessError. Error Code is ' +
str(e.returncode), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Error Code is '
+ str(e.returncode))
Print(
'CalledProcessError. Command string was '
+ e.cmd, file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command string was ' + e.cmd)
Print('CalledProcessError. Command result was ' +
(e.output[:-1]).decode('ascii','ignore'), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command result was '
+ (e.output[:-1]).decode('ascii','ignore'))
if no_output:
return e.returncode, None
else:
return e.returncode, e.output.decode('ascii','ignore')
if no_output:
return 0, None
else:
return 0, output.decode('ascii','ignore')
systemctl_path = "/usr/bin/systemctl"
upstart_start_path = "/sbin/start"
upstart_stop_path = "/sbin/stop"
upstart_status_path = "/sbin/status"
initd_service = "/sbin/service"
initd_service_partial = "/etc/init.d/"
initd_chkconfig = "/sbin/chkconfig"
initd_invokerc = "/usr/sbin/invoke-rc.d"
initd_updaterc = "/usr/sbin/update-rc.d"
lsb_install_initd = "/usr/lib/lsb/install_initd"
lsb_remove_initd = "/usr/lib/lsb/remove_initd"
runlevel_path = "/sbin/runlevel"
def ReadFile(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
with opened_w_error(path, 'rb') as (F, error):
if error:
Print("Exception opening file " + path + " Error Code: " +
str(error.errno) +
" Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path +
" Error Code: " +
str(error.errno) + " Error: " + error.message +
error.strerror)
else:
d = F.read()
return d, error
def WriteFile(path, contents):
"""
Safely attempt to write data to a file,
replacing the existing file or creating it and
ensuring file is always closed at exit.
Return the exception object.
The error is None if the data was written.
Log results to stderr.
"""
error = None
with opened_w_error(path, 'wb+') as (F, error):
if error:
Print("Exception opening file " + path + " Error Code: " +
str(error.errno) +
" Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path +
" Error Code: " +
str(error.errno) + " Error: " + error.message +
error.strerror)
else:
F.write(contents)
return error
def Process(params, no_output=False):
line = ''
spc = ''
for p in params:
line += (spc + p)
if len(spc) is 0:
spc = ' '
code, out = RunGetOutput(line, no_output, False)
return (out, out, code)
def StartService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "start", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " start " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" start " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_start_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_start_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + sc.Name + " start failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" start failed: " + process_stderr)
return [-1]
return [0]
def StopService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "stop", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + systemctl_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_stop_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_stop_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + sc.Name + " stop failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" stop failed: " + process_stderr)
return [-1]
return [0]
def GetRunLevel():
(process_stdout, process_stderr, retval) = Process([runlevel_path])
if retval is not 0:
Print("Error: " + runlevel_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + runlevel_path + " failed: " + process_stderr)
return -1
tokens = process_stdout.split(" ")
if len(tokens) is not 2:
Print("Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout, file=sys.stderr)
LG().Log('ERROR', "Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout)
return -1
return int(tokens[1])
def DetermineInitState(stdout):
if "is running" in stdout or "start/running" in stdout \
or "..running" in stdout:
return True
elif stdout.strip() == "running":
return True
elif "(running)" in stdout:
return True
else:
return False
def DetermineInitEnabled(stdout, runlevel):
tokens = stdout.split()
tokens = tokens[1:]
if runlevel > (len(tokens) - 1):
Print("runlevel " + str(runlevel) +
" not found in chkconfig", file=sys.stderr)
LG().Log(
'ERROR', "runlevel " + str(runlevel) + " not found in chkconfig")
return False
runlevel_tokens = tokens[runlevel].split(":")
if len(runlevel_tokens) is not 2:
Print(
"Unable to determine format for chkconfig run level",
file=sys.stderr)
LG().Log(
'ERROR', "Unable to determine format for chkconfig run level")
return False
if runlevel_tokens[1] == "on":
return True
else:
return False
def GetSystemdState(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is 0:
if '(running)' in process_stdout:
return "running"
return "stopped"
def TestSystemdState(sc):
if sc.State and sc.State != GetSystemdState(sc):
return False
return True
def GetSystemdEnabled(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "is-enabled", sc.Name])
if retval is 0:
return True
else:
return False
def TestSystemdEnabled(sc):
if sc.Enabled is not GetSystemdEnabled(sc):
return False
return True
def TestSystemd(sc):
if not SystemdExists():
return [-1]
if not TestSystemdState(sc):
return [-1]
if not TestSystemdEnabled(sc):
return [-1]
return [0]
def GetUpstartState(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_status_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_status_path +
" failed: " + process_stderr)
return ""
if (sc.Name + " start") in process_stdout:
return "running"
else:
return "stopped"
def TestUpstartState(sc):
if sc.State and sc.State != GetUpstartState(sc):
return False
return True
def GetUpstartEnabled(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
start_on_exists = False
start_on_is_enabled = False
stop_on_exists = False
stop_on_is_enabled = False
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if error is not None:
Print(
"Error reading:/etc/init/" + sc.Name + ".conf",
file=sys.stderr)
LG().Log('ERROR', "Error reading:/etc/init/" +
sc.Name + ".conf")
return "Error"
for full_line in file_lines.splitlines():
# everything after a '#' character is a comment, so strip it off
line = full_line.split("#")[0]
if "start on" in line:
start_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "start on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
start_on_is_enabled = True
else:
start_on_is_enabled = False
if "!" in specified_runlevel_digits:
start_on_is_enabled = not start_on_is_enabled
else:
return "Complex"
if "stop on" in line:
stop_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "stop on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
stop_on_is_enabled = True
else:
stop_on_is_enabled = False
if "!" in specified_runlevel_digits:
stop_on_is_enabled = not stop_on_is_enabled
else:
return "Complex"
if not start_on_exists and not stop_on_exists: # not upstart
if os.path.islink('/etc/init.d/' + sc.Name) and \
os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc2.d
# for smylink to conf file. if so its enabled.
file_list = os.listdir('/etc/rc2.d')
for f in file_list:
f = '/etc/rc2.d/' + f
if os.path.islink(f) and os.readlink(f) == \
"../init.d/" + sc.Name:
return True
return False
(process_stdout, process_stderr, retval) = Process(
['chkconfig', sc.Name, '']) # try init style
if retval is 0:
if 'off' not in process_stdout:
return True
return False
if start_on_exists and start_on_is_enabled:
if stop_on_exists and stop_on_is_enabled:
Print("Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.", file=sys.stderr)
LG().Log('ERROR',
"Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.")
return "Complex"
else:
return True
else:
return False
Print("Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf", file=sys.stderr)
LG().Log('ERROR',
"Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf")
return False
else:
Print("Error: conf file does not exist for service named " +
sc.Name, file=sys.stderr)
LG().Log('ERROR',
"Error: conf file does not exist for service named " +
sc.Name)
return False
def TestUpstartEnabled(sc):
currently_enabled = GetUpstartEnabled(sc)
if currently_enabled == "Complex":
Print("Error: Cannot modify 'Enabled' state for service " + sc.Name +
", conf file too complex. Please use the File provider to " +
"write your own conf file for this service.", file=sys.stderr)
LG().Log('ERROR', "Error: Cannot modify 'Enabled' state for service "
+ sc.Name +
", conf file too complex. Please use the File provider to " +
" writeyour own conf file for this service.")
return False
return currently_enabled
def TestUpstart(sc):
if not UpstartExists():
return [-1]
if not TestUpstartState(sc):
return [-1]
if sc.Enabled is not TestUpstartEnabled(sc):
return [-1]
return [0]
def GetInitState(sc):
check_state_program = initd_service_partial + sc.Name
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = '/usr/sbin/service'
if os.path.isfile('/usr/sbin/service'):
check_state_program = '/usr/sbin/service'
else: # invoke the service directly
check_state_program = '/etc/init.d/'
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "status"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " status failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " status failed: ")
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if retval is not 0:
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
if DetermineInitState(process_stdout):
return "running"
else:
return "stopped"
def TestInitState(sc):
if sc.State and sc.State != GetInitState(sc):
return False
return True
def GetInitEnabled(sc):
runlevel = GetRunLevel()
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
# A service is enabled if a symbolic link
# exists in /etc/rc${RUNLEVEL}.d/ with the name:
# S??${sc.Name}
matched_files = glob.glob(
"/etc/rc" + str(runlevel) + ".d/S??" + sc.Name)
for f in matched_files:
if os.path.islink(f):
return True
return False
else:
check_enabled_program = initd_chkconfig
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "--list", sc.Name])
if retval is not 0:
Print("Error: " + check_enabled_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" failed: " + process_stderr)
return False
if DetermineInitEnabled(process_stdout, runlevel):
return True
else:
return False
def TestInitEnabled(sc):
if sc.Enabled is not GetInitEnabled(sc):
return False
return True
def TestInit(sc):
if not InitExists():
return [-1]
if not TestInitState(sc):
return [-1]
if not TestInitEnabled(sc):
return [-1]
return [0]
def SystemdExists():
global systemctl_path
code, out = RunGetOutput('which systemctl', False, False)
if code is 0:
systemctl_path = out.strip()
return True
else:
return False
def UpstartExists():
if (os.path.isfile('/sbin/upstart-local-bridge')
or os.path.isfile('/sbin/upstart-udev-bridge')) \
and os.path.isfile(upstart_start_path) \
and os.path.isfile(upstart_stop_path) \
and os.path.isfile(upstart_status_path):
return True
else:
return False
def InitExists():
if os.path.isfile(initd_service) and os.path.isfile(initd_chkconfig):
return True
elif os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
return True
else:
return False
def ServiceExistsInSystemd(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is not 0:
if "Loaded: loaded" in process_stdout:
return True
else:
return False
else:
return True
def ServiceExistsInUpstart(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
return False
else:
return True
def ServiceExistsInInit(sc):
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if "unrecognized service" in process_stderr \
or "no such service" in process_stderr:
Print(process_stderr, file=sys.stderr)
LG().Log('INFO', process_stderr)
return False
else:
return True
def CreateSystemdService(sc):
Print("Error: systemd services cannot be created from the service " +
"provider. Please use the file provider to create a systemd " +
"conf file, then modify the service using this service provider.",
file=sys.stderr)
LG().Log('ERROR',
"Error: systemd services cannot be created from the service provider. \
Please use the file provider to create a systemd conf file, \
then modify the service using this service provider.")
return [-1]
def ModifySystemdService(sc):
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "enable", sc.Name + '.service'])
if retval is not 0:
Print("Error: " + systemctl_path + " enable " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" enable " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "disable", sc.Name + '.service'])
if retval is not 0:
Print("Error: " + systemctl_path + " disable " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" disable " + sc.Name + " failed: " + process_stderr)
return [-1]
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name + '.service'])
# retval may be non zero even if service exists for 'status'.
if 'No such file or directory' in process_stdout:
Print("Error: " + systemctl_path + " status " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" status " + sc.Name + " failed: " + process_stderr)
return [-1]
if 'Active: active' in process_stdout:
Print("Running", file=sys.stderr)
LG().Log('INFO', "Running")
if sc.State and sc.State != "running":
return StopService(sc)
else:
Print("Stopped", file=sys.stderr)
LG().Log('INFO', "Stopped")
if sc.State and sc.State != "stopped":
return StartService(sc)
return [0]
def CreateUpstartService(sc):
Print("Error: Upstart services cannot be created from the service " +
"provider. Please use the file provider to create an upstart " +
"conf file, then modify the service using this service provider.",
file=sys.stderr)
LG().Log('ERROR',
"Error: Upstart services cannot be created from the service " +
"provider. Please use the file provider to create an upstart " +
"conf file, then modify the service using this service provider.")
return [-1]
def ModifyUpstartConfFile(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if len(file_lines) is 0 or error is not None:
Print("Error: Conf file unable to be read for service " +
sc.Name, file=sys.stderr)
LG().Log(
'ERROR', "Error: Conf file unable to be read for service " +
sc.Name)
return False
outfile = ""
start_on_exists = False
stop_on_exists = False
for full_line in file_lines.splitlines():
line = full_line.split("#")[0]
if "start on" in line or "stop on" in line and not start_on_exists:
# If we got to this point, we can assume that we're allowed to
# modify the conf file. No need to check for a "Complex" conf
# file.
start_on_exists = True
if sc.Enabled is True:
outfile += "start on runlevel [2345]\n"
outfile += "stop on runlevel [!2345]\n"
elif sc.Enabled is False:
outfile += "stop on runlevel [0123456]\n"
elif "start on" in line or "stop on" in line and start_on_exists:
continue # its xtra now
else:
outfile += full_line + "\n"
if start_on_exists or stop_on_exists:
if WriteFile("/etc/init/" + sc.Name + ".conf", outfile) \
is not None:
Print(
"Error: Unable to write conf file for service " + sc.Name,
file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to write conf file for service " +
sc.Name)
return False
return True
else: # not an upstart service
if os.path.islink('/etc/init.d/' + sc.Name) \
and os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc[2345].d
# for smylink to conf file. if so its enabled.
for rc in range(2, 6):
file_list = os.listdir('/etc/rc' + str(rc) + '.d')
found = False
for f in file_list:
f = '/etc/rc' + str(rc) + '.d/' + f
if os.path.islink(f) and os.readlink(f) \
== "../init.d/" + sc.Name:
found = True
break
if sc.Enabled is True:
if not found:
# create the symlink
os.symlink(
"../init.d/" + sc.Name, "/etc/rc2.d/S22" + sc.Name)
return True
else:
if found:
os.unlink(f)
return True
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
['update-rc.d', sc.Name, ' defaults'])
if retval is not 0:
Print("Error: " + process_stdout + " enable " +
sc.Name + " failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + process_stdout +
" enable " + sc.Name + " failed: " + process_stderr)
return False
else:
(process_stdout, process_stderr, retval) = Process(
['update-rc.d -f ', sc.Name, ' remove'])
if retval is not 0:
Print("Error: " + process_stdout + " disable " +
sc.Name + " failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + process_stdout +
" disable " + sc.Name + " failed: " + process_stderr)
return False
return True
def ModifyUpstartService(sc):
if sc.Enabled is not TestUpstartEnabled(sc):
if not ModifyUpstartConfFile(sc):
Print("Error: Failed to modify upstart conf file", file=sys.stderr)
LG().Log('ERROR', "Error: Failed to modify upstart conf file")
return [-1]
if sc.State == "running":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
if "Job is already running" not in process_stderr:
Print("Error: " + upstart_start_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_start_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + upstart_start_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_start_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.State == "stopped":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
if "Unknown instance" not in process_stderr:
Print("Error: " + upstart_stop_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_stop_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + upstart_stop_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_stop_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
return [0]
def CreateInitService(sc):
(process_stdout, process_stderr, retval) = Process(
[lsb_install_initd, sc.Name])
if retval is not 0:
Print("Error: " + lsb_install_initd + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + lsb_install_initd +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
return ModifyInitService(sc)
def ModifyInitService(sc):
check_state_program = initd_service
check_enabled_program = initd_chkconfig
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
if os.path.isfile('/usr/sbin/service'):
check_state_program = '/usr/sbin/service'
else: # invoke the service directly
check_state_program = '/etc/init.d/'
check_enabled_program = initd_updaterc
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "enable"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " enable failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " enable failed: " + process_stderr)
# try 'defaults'
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
if 'already exist' in process_stdout: # we need to remove them first
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
# it should work now
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "disable"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " disable failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " disable failed: " + process_stderr)
# try remove
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
else:
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, sc.Name, "on"])
if retval is not 0:
Print("Error: " + check_enabled_program + " " + sc.Name +
" on failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" " + sc.Name + " on failed: " + process_stderr)
# try 'defaults'
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
if 'already exist' in process_stdout: # we need to remove them first
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
# it should work now
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, sc.Name, "off"])
if retval is not 0:
Print("Error: " + check_enabled_program + " " + sc.Name +
" off failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" " + sc.Name + " off failed: " + process_stderr)
# try remove
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
if sc.State == "running":
# don't try to read stdout or stderr as 'service start' comand
# re-directs them, causing a hang in subprocess.communicate()
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "start"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " start failed: ")
return [-1]
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"], True)
if retval is not 0:
Print("Error: " + check_state_program + " " +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " start failed: ")
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + check_state_program + " " +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " start failed: ")
return [-1]
elif sc.State == "stopped":
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "stop"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " stop failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " stop failed: ")
return [-1]
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program + " " + sc.Name +
" stop failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " stop failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + check_state_program + " " + sc.Name +
" stop failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " stop failed: " + process_stderr)
return [-1]
return [0]
def IsServiceRunning(sc):
time.sleep(1)
cmd = 'ps -ef | grep -v grep | grep -E ".*( ' + \
sc.Name + '|/' + sc.Name + ')( |$)"'
code, out = RunGetOutput(cmd, False, False)
if code is not 0:
return False
return True
def Set(Name, Controller, Enabled, State):
ShowMof('SET', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
if sc.Controller == "systemd":
if SystemdExists() is True:
if ServiceExistsInSystemd(sc):
return ModifySystemdService(sc)
else:
return CreateSystemdService(sc)
elif sc.Controller == "upstart":
if UpstartExists() is True:
if ServiceExistsInUpstart(sc):
return ModifyUpstartService(sc)
else:
return CreateUpstartService(sc)
elif sc.Controller == "init":
if InitExists() is True:
if ServiceExistsInInit(sc):
return ModifyInitService(sc)
else:
return CreateInitService(sc)
return [-1]
def Test(Name, Controller, Enabled, State):
ShowMof('TEST', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
if sc.Controller == "systemd":
return TestSystemd(sc)
elif sc.Controller == "upstart":
return TestUpstart(sc)
elif sc.Controller == "init":
return TestInit(sc)
else:
Print("Invalid service controller (" + sc.Controller +
") specified for service: " + sc.Name, file=sys.stderr)
LG().Log('ERROR', "Invalid service controller (" +
sc.Controller + ") specified for service: " + sc.Name)
return [-1]
return [-1]
def Get(Name, Controller, Enabled, State):
ShowMof('GET', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
Path = ""
exit_code = 0
if not sc.Controller:
Print("Error: Controller not specified", file=sys.stderr)
LG().Log('ERROR', "Error: Controller not specified")
exit_code = -1
elif sc.Controller == "systemd":
if not ServiceExistsInSystemd(sc):
Print("Error: Unable to find service named " +
sc.Name + " in systemd.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in systemd.")
exit_code = -1
else:
Enabled = GetSystemdEnabled(sc)
State = GetSystemdState(sc)
Path = "/usr/lib/systemd/system/" + sc.Name + ".service"
elif sc.Controller == "upstart":
if not ServiceExistsInUpstart(sc):
Print("Error: Unable to find service named " +
sc.Name + " in upstart.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in upstart.")
exit_code = -1
else:
temp = GetUpstartEnabled(sc)
if temp is False:
Enabled = False
else:
# When GetUpstartEnabled returns "Complex", we assume that it
# is enabled (and we won't modify it).
Enabled = True
State = GetUpstartState(sc)
Path = "/etc/init/" + sc.Name + ".conf"
elif sc.Controller == "init":
if not ServiceExistsInInit(sc):
Print("Error: Unable to find service named " +
sc.Name + " in init.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in init.")
exit_code = -1
else:
Enabled = GetInitEnabled(sc)
State = GetInitState(sc)
Path = "/etc/init.d/" + sc.Name
return [exit_code, Name, Controller, Enabled, State, Path]
class ServiceContext:
def __init__(self, Name, Controller, Enabled, State):
if not Name:
raise Exception("Error: Service has no name.")
if not Controller:
raise Exception("Error: Controller not specified.")
self.Name = Name
self.Controller = Controller
self.Enabled = Enabled
self.State = State
self.Path = ''
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations:
"""ConnectionMonitorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
migrate: Optional[str] = None,
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if migrate is not None:
query_parameters['migrate'] = self._serialize.query("migrate", migrate, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
migrate: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionMonitorResult"]:
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.ConnectionMonitor
:param migrate: Value indicating whether connection monitor V1 should be migrated to V2 format.
:type migrate: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
migrate=migrate,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
"""Update tags of the specified connection monitor.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters supplied to update connection monitor tags.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def _query_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> "_models.ConnectionMonitorQueryResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
async def begin_query(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionMonitorQueryResult"]:
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ConnectionMonitorListResult"]:
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.